bitkeeper revision 1.105.1.9 (3e67719fUVPJZo6pYtLZ12frkd1BqQ)
authorkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Thu, 6 Mar 2003 16:04:47 +0000 (16:04 +0000)
committerkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Thu, 6 Mar 2003 16:04:47 +0000 (16:04 +0000)
xl_scsi.c, xl_ide.c, grok.c:
  new file
Many files:
  Steve's new SCSI world.

25 files changed:
.rootkeys
xen/Rules.mk
xen/drivers/block/genhd.c
xen/drivers/block/grok.c [new file with mode: 0644]
xen/drivers/block/xen_block.c
xen/drivers/ide/ide.c
xen/drivers/scsi/aacraid/aachba.c
xen/drivers/scsi/aacraid/aacraid.h
xen/drivers/scsi/aacraid/comminit.c
xen/drivers/scsi/aacraid/commsup.c
xen/drivers/scsi/aacraid/linit.c
xen/drivers/scsi/aacraid/rx.c
xen/drivers/scsi/scsi.c
xen/drivers/scsi/scsi.h
xen/drivers/scsi/scsi_error.c
xen/drivers/scsi/scsi_lib.c
xen/drivers/scsi/scsi_merge.c
xen/drivers/scsi/sd.c
xen/include/xeno/config.h
xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/Makefile
xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c
xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c [new file with mode: 0644]
xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c [new file with mode: 0644]
xenolinux-2.4.21-pre4-sparse/include/linux/major.h
xenolinux-2.4.21-pre4-sparse/init/do_mounts.c

index b72dac2fc58576175fc15476beb72414c9917ab3..f129d8995786e2807ac1f2ceab765e505edcd5f5 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3ddb79be04dyXzyXqDbMRS_1funwXQ xen/drivers/block/blkpg.c
 3ddb79beME_0abStePF6fU8XLuQnWw xen/drivers/block/elevator.c
 3ddb79beNQVrdGyoI4njXhgAjD6a4A xen/drivers/block/genhd.c
+3e677183FxihZVsJDCnvV2S0-FEZyA xen/drivers/block/grok.c
 3ddb79beyWwLRP_BiM2t1JKgr_plEw xen/drivers/block/ll_rw_blk.c
 3e4a8cb7RhubVgsPwO7cK0pgAN8WCQ xen/drivers/block/xen_block.c
 3e4a8cb7alzQCDKS7MlioPoHBKYkdQ xen/drivers/char/Makefile
 3e5a4e65iHEuC5sjFhj42XALYbLVRw xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/Makefile
 3e5a4e65pP5spJErBW69pJxSSdK9RA xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c
 3e5a4e65GtI9JZRAjuRdXaxt_4ohyQ xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block_test.c
+3e677190SjkzJIvFifRVeYpIZOCtYA xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c
+3e677193nOKKTLJzcAu4SYdbZaia8g xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c
 3e5a4e65G3e2s0ghPMgiJ-gBTUJ0uQ xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/console/Makefile
 3e5a4e651TH-SXHoufurnWjgl5bfOA xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/console/console.c
 3e5a4e656nfFISThfbyXQOA6HN6YHw xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/dom0/Makefile
index 13a57ed550fa1bc067c3ce387154046cd8d9ed1b..d0feffd7f947e31eaa6bbd6fb5cb1f11cb996aa0 100644 (file)
@@ -20,7 +20,7 @@ ALL_OBJS += $(BASEDIR)/drivers/pci/driver.o
 ALL_OBJS += $(BASEDIR)/drivers/net/driver.o
 ALL_OBJS += $(BASEDIR)/drivers/block/driver.o
 ALL_OBJS += $(BASEDIR)/drivers/ide/driver.o
-#ALL_OBJS += $(BASEDIR)/drivers/scsi/driver.o
+ALL_OBJS += $(BASEDIR)/drivers/scsi/driver.o
 ALL_OBJS += $(BASEDIR)/arch/$(ARCH)/arch.o
 
 HOSTCC     = gcc
index 427c2cb312835081785bf36fe98e17d979f19e68..89d892ad475b0372061f260520aad97fcb61a13a 100644 (file)
@@ -193,6 +193,67 @@ out:
 }
 #endif
 
+/* XXX SMH: stuff from fs/partitions dumped here temporarily */
+
+
+/*
+ * This function will re-read the partition tables for a given device,
+ * and set things back up again.  There are some important caveats,
+ * however.  You must ensure that no one is using the device, and no one
+ * can start using the device while this function is being executed.
+ *
+ * Much of the cleanup from the old partition tables should have already been
+ * done
+ */
+void register_disk(struct gendisk *gdev, kdev_t dev, unsigned minors,
+    struct block_device_operations *ops, long size)
+{
+    if (!gdev)
+        return;
+    grok_partitions(gdev, MINOR(dev)>>gdev->minor_shift, minors, size);
+}
+
+void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size)
+{
+       int i;
+       int first_minor = drive << dev->minor_shift;
+       int end_minor   = first_minor + dev->max_p;
+
+       if(!dev->sizes)
+               blk_size[dev->major] = NULL;
+
+       dev->part[first_minor].nr_sects = size;
+#ifdef DEVFS_MUST_DIE
+       /* No such device or no minors to use for partitions */
+       if ( !size && dev->flags && (dev->flags[drive] & GENHD_FL_REMOVABLE) )
+               devfs_register_partitions (dev, first_minor, 0);
+#endif
+
+       if (!size || minors == 1)
+               return;
+
+       if (dev->sizes) {
+               dev->sizes[first_minor] = size >> (BLOCK_SIZE_BITS - 9);
+               for (i = first_minor + 1; i < end_minor; i++)
+                       dev->sizes[i] = 0;
+       }
+       blk_size[dev->major] = dev->sizes;
+#if 0
+       /* XXX SMH: don't actually check partition details yet */
+       check_partition(dev, MKDEV(dev->major, first_minor), 1 + first_minor);
+#endif
+
+       /*
+        * We need to set the sizes array before we will be able to access
+        * any of the partitions on this device.
+        */
+       if (dev->sizes != NULL) {       /* optional safeguard in ll_rw_blk.c */
+               for (i = first_minor; i < end_minor; i++)
+                       dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+       }
+}
+
+
 
 extern int blk_dev_init(void);
 extern int net_dev_init(void);
diff --git a/xen/drivers/block/grok.c b/xen/drivers/block/grok.c
new file mode 100644 (file)
index 0000000..94f6df8
--- /dev/null
@@ -0,0 +1,39 @@
+void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size)
+{
+       int i;
+       int first_minor = drive << dev->minor_shift;
+       int end_minor   = first_minor + dev->max_p;
+
+       if(!dev->sizes)
+               blk_size[dev->major] = NULL;
+
+       dev->part[first_minor].nr_sects = size;
+#ifdef DEVFS_MUST_DIE
+       /* No such device or no minors to use for partitions */
+       if ( !size && dev->flags && (dev->flags[drive] & GENHD_FL_REMOVABLE) )
+               devfs_register_partitions (dev, first_minor, 0);
+#endif
+
+       if (!size || minors == 1)
+               return;
+
+       if (dev->sizes) {
+               dev->sizes[first_minor] = size >> (BLOCK_SIZE_BITS - 9);
+               for (i = first_minor + 1; i < end_minor; i++)
+                       dev->sizes[i] = 0;
+       }
+       blk_size[dev->major] = dev->sizes;
+#if 0
+       /* XXX SMH: don't actually check partition details yet */
+       check_partition(dev, MKDEV(dev->major, first_minor), 1 + first_minor);
+#endif
+
+       /*
+        * We need to set the sizes array before we will be able to access
+        * any of the partitions on this device.
+        */
+       if (dev->sizes != NULL) {       /* optional safeguard in ll_rw_blk.c */
+               for (i = first_minor; i < end_minor; i++)
+                       dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+       }
+}
index 805fd9e1ae75bb48365a934887a895ab9c7f4bf2..f73f647fc49a3f4b05fb404844157e85dc8e13b2 100644 (file)
@@ -223,11 +223,18 @@ static void dispatch_debug_block_io(struct task_struct *p, int index)
 static void dispatch_probe_block_io(struct task_struct *p, int index)
 {
     extern void ide_probe_devices(xen_disk_info_t *xdi);
+    extern void scsi_probe_devices(xen_disk_info_t *xdi);
     blk_ring_t *blk_ring = p->blk_ring_base;
     xen_disk_info_t *xdi;
 
     xdi = phys_to_virt((unsigned long)blk_ring->ring[index].req.buffer);    
+
+    /* 
+    ** SMH: by convention we first probe IDE, then SCSI; the latter
+    ** apppends per-device info to the end of the xdi structure. 
+    */
     ide_probe_devices(xdi);
+    scsi_probe_devices(xdi); 
 
     make_response(p, blk_ring->ring[index].req.id, 0);
 }
@@ -309,6 +316,7 @@ static void dispatch_rw_block_io(struct task_struct *p, int index)
     bh->b_blocknr       = blk_ring->ring[index].req.block_number;
     bh->b_size          = size;
     bh->b_dev           = blk_ring->ring[index].req.device; 
+
     bh->b_rsector       = blk_ring->ring[index].req.sector_number;
     bh->b_data          = phys_to_virt(buffer);
     bh->b_count.counter = 1;
index 1db4e34834bf67beb18bbc5563c382c908df1998..84edfa28d6c4539c9cee38dfa2035da8105db248 100644 (file)
@@ -183,12 +183,6 @@ int unregister_blkdev(unsigned int major, const char * name) { return 0; }
 int invalidate_device(kdev_t dev, int do_sync) { return 0; }
 /* fs/buffer.c... */
 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) { }
-/* fs/partitions/check.c... */
-void grok_partitions(struct gendisk *dev, int drive, 
-                     unsigned minors, long size) { }
-void register_disk(struct gendisk *dev, kdev_t first, 
-                   unsigned minors, struct block_device_operations *ops, 
-                   long size) { }
 /* fs/devices.c... */
 const char * kdevname(kdev_t dev) { return NULL; }
 /* End of XXXXXX region */
index 21fc4259b87a4302846cf2d68583b227c1518f4d..198e37cace5392a6dbd80eb6a84cac3b62ae7107 100644 (file)
@@ -253,7 +253,6 @@ int aac_get_containers(struct aac_dev *dev)
        dinfo->count = cpu_to_le32(index);
        dinfo->type = cpu_to_le32(FT_FILESYS);
 
-       printk("aac_get_container: getting info for container %d\n", index); 
        status = fib_send(ContainerCommand,
                          fibptr,
                          sizeof (struct aac_query_mount),
index 1f9838436d7bb9ba8897fc21a1e6b19064120fc5..ce6566bcc17b629479dbe53cb8bbbdb80967be8a 100644 (file)
@@ -1410,7 +1410,7 @@ unsigned int aac_response_normal(struct aac_queue * q);
 unsigned int aac_command_normal(struct aac_queue * q);
 #ifdef TRY_TASKLET
 extern struct tasklet_struct aac_command_tasklet;
-int aac_command_thread(unsigned long data);
+void aac_command_thread(unsigned long data);
 #else
 int aac_command_thread(struct aac_dev * dev);
 #endif
index 29a3dba28e0e8ca659cc8c11823bb08b27348cb7..b4681a473b47578ebee3e2a4dd9b81e3b66d2931 100644 (file)
@@ -325,10 +325,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
        }
        memset(dev->queues, 0, sizeof(struct aac_queue_block));
 
-       printk("aac_init_adapater, dev is %p\n", dev); 
        if (aac_comm_init(dev)<0)
                return NULL;
-       printk("aac_init_adapater, dev->init is %p\n", dev->init); 
+
        /*
         *      Initialize the list of fibs
         */
index 7d84ad241cf46dfbfe28b7f3e8bcf2a28dac7e5b..a1fabe7b19aa1707ba41d187f413160c1f360d1a 100644 (file)
@@ -535,12 +535,13 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority
          * do_softirq() after scheduling the tasklet, as long as we
          * are _sure_ we hold no locks here...
          */
-       printk("about to softirq aac_command_thread...\n"); 
+//     printk("about to softirq aac_command_thread...\n"); 
        while (!fibptr->done) { 
             tasklet_schedule(&aac_command_tasklet);
-           mdelay(100); 
+           do_softirq(); /* force execution */
+//         mdelay(100); 
        }
-       printk("back from softirq cmd thread and fibptr->done!\n"); 
+//     printk("back from softirq cmd thread and fibptr->done!\n"); 
 #else 
        printk("about to bail at aac_command_thread...\n"); 
        while (!fibptr->done) { 
@@ -843,11 +844,12 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
  */
  
 #ifndef TRY_TASKLET
-DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
 int aac_command_thread(struct aac_dev * dev)
 {
 #else
-int aac_command_thread(unsigned long data)
+DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
+void aac_command_thread(unsigned long data)
+#define return(_x) return 
 {   
     struct aac_dev *dev = (struct aac_dev *)data; 
 #endif
@@ -863,9 +865,8 @@ int aac_command_thread(unsigned long data)
     /*
      * We can only have one thread per adapter for AIF's.
      */
-    printk("aac_command_'thread': entered.\n"); 
     if (dev->aif_thread)
-       return -EINVAL;
+       return(-EINVAL);
 
 #if 0
     /*
@@ -888,9 +889,7 @@ int aac_command_thread(unsigned long data)
 //    while(1) 
     {
 
-       printk("aac_command_thread: in 'loop'\n"); 
        spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
-       printk("flags = %x\n", flags); 
        while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
            struct list_head *entry;
            struct aac_aifcmd * aifcmd;
@@ -905,7 +904,6 @@ int aac_command_thread(unsigned long data)
                        
            spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock,flags);
            fib = list_entry(entry, struct hw_fib, header.FibLinks);
-           printk("aac_command_thread: got fib \n"); 
            /*
             *  We will process the FIB here or pass it to a 
             *  worker thread that is TBD. We Really can't 
@@ -923,7 +921,6 @@ int aac_command_thread(unsigned long data)
             */
            aifcmd = (struct aac_aifcmd *) fib->data;
            if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
-               printk("aac_command_thread: handling aif... :-( \n"); 
                aac_handle_aif(dev, &fibptr);
            } else {
                /* The u32 here is important and intended. We are using
@@ -1024,5 +1021,5 @@ int aac_command_thread(unsigned long data)
     dev->aif_thread = 0;
 
 #endif
-    return 0;
+    return(0);
 }
index b5026d9065d799d5df6118a74c1866988bf12d3e..9c3cffda809c6e30e0da77fa73aff62b9b485aac 100644 (file)
@@ -338,7 +338,6 @@ static int aac_detect(Scsi_Host_Template *template)
 #endif
 
     template->present = aac_count; /* # of cards of this type found */
-    printk(KERN_DEBUG "aac_detect: returning %d\n", aac_count); 
     return aac_count;
 }
 
@@ -461,7 +460,7 @@ static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
     struct buffer_head * buf;
     
     dprintk((KERN_DEBUG "aac_biosparm.\n"));
-    
+
     /*
      * Assuming extended translation is enabled - #REVISIT#
      */
@@ -499,7 +498,6 @@ static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
      * table entry whose end_head matches one of the standard geometry 
      * translations ( 64/32, 128/32, 255/63 ).
      */
-#endif
 
         
     if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
@@ -560,7 +558,6 @@ static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
                     param->heads, param->sectors));
        }
     }
-#if 0
     brelse(buf);
 #endif
     return 0;
index e79ad49e742cfe98c080be2b6397458155eeb437..8d4685ba0f0fc6141ff64d4d7108e63a3853a10f 100644 (file)
@@ -311,7 +311,7 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
     struct aac_init *init;
     
     init = dev->init;
-    printk("aac_rx_start: dev is %p, init is %p\n", dev, init); 
+
     init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
     /*
      * Tell the adapter we are back and up and running so it will scan
index 85a59f54ac9a64971412d05ef7c15facb0187157..46ddcb77b13f00674d95f2efab05cee6a1d34a50 100644 (file)
 /*#include <xeno/smp_lock.h>*/
 /*#include <xeno/completion.h>*/
 
+/* for xeno scsi_probe() stuff... maybe punt somewhere else? */
+#include <hypervisor-ifs/block.h>
+#include <xeno/blkdev.h>
+
 #define __KERNEL_SYSCALLS__
 
 /*#include <xeno/unistd.h>*/
@@ -240,7 +244,14 @@ static void scsi_wait_done(Scsi_Cmnd * SCpnt)
     if (req->waiting != NULL) {
         complete(req->waiting);
     }
+#else 
+    /* XXX SMH: just use a flag to signal completion; caller spins */
+    if (*(int *)(req->waiting) != 0) {
+//        printk("scsi_wait_done: flipping wait status on req %p\n", req); 
+        *(int *)(req->waiting) = 0; 
+    }
 #endif
+
 }
 
 /*
@@ -317,15 +328,8 @@ void scsi_release_request(Scsi_Request * req)
 {
     if( req->sr_command != NULL )
     {
-#ifdef SMH_DEBUG
-        printk("scsi_release_request: req->sr_command = %p\n", 
-                   req->sr_command); 
-#endif
         scsi_release_command(req->sr_command);
         req->sr_command = NULL;
-#ifdef SMHHACK 
-        req->freeaddr = 0x1234; 
-#endif
     }
     
     kfree(req);
@@ -361,225 +365,227 @@ void scsi_release_request(Scsi_Request * req)
 Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait, 
                                 int interruptable)
 {
-       struct Scsi_Host *host;
-       Scsi_Cmnd *SCpnt = NULL;
-       Scsi_Device *SDpnt;
-       unsigned long flags;
-  
-       if (!device)
-               panic("No device passed to scsi_allocate_device().\n");
-  
-       host = device->host;
-  
-       spin_lock_irqsave(&device_request_lock, flags);
-       while (1 == 1) {
-               SCpnt = NULL;
-               if (!device->device_blocked) {
-                       if (device->single_lun) {
-                               /*
-                                * FIXME(eric) - this is not at all optimal.  Given that
-                                * single lun devices are rare and usually slow
-                                * (i.e. CD changers), this is good enough for now, but
-                                * we may want to come back and optimize this later.
-                                *
-                                * Scan through all of the devices attached to this
-                                * host, and see if any are active or not.  If so,
-                                * we need to defer this command.
-                                *
-                                * We really need a busy counter per device.  This would
-                                * allow us to more easily figure out whether we should
-                                * do anything here or not.
-                                */
-                               for (SDpnt = host->host_queue;
-                                    SDpnt;
-                                    SDpnt = SDpnt->next) {
-                                       /*
-                                        * Only look for other devices on the same bus
-                                        * with the same target ID.
-                                        */
-                                       if (SDpnt->channel != device->channel
-                                           || SDpnt->id != device->id
-                                           || SDpnt == device) {
-                                               continue;
-                                       }
-                                        if( atomic_read(&SDpnt->device_active) != 0)
-                                        {
-                                                break;
-                                        }
-                               }
-                               if (SDpnt) {
-                                       /*
-                                        * Some other device in this cluster is busy.
-                                        * If asked to wait, we need to wait, otherwise
-                                        * return NULL.
-                                        */
-                                       SCpnt = NULL;
-                                       goto busy;
-                               }
-                       }
-                       /*
-                        * Now we can check for a free command block for this device.
-                        */
-                       for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
-                               if (SCpnt->request.rq_status == RQ_INACTIVE)
-                                       break;
-                       }
-               }
-               /*
-                * If we couldn't find a free command block, and we have been
-                * asked to wait, then do so.
-                */
-               if (SCpnt) {
-                       break;
-               }
-      busy:
-               /*
-                * If we have been asked to wait for a free block, then
-                * wait here.
-                */
-               if (wait) {
-                    printk("XXX smh: scsi cannot wait for free cmd block.\n"); 
-                    BUG(); 
+    struct Scsi_Host *host;
+    Scsi_Cmnd *SCpnt = NULL;
+    Scsi_Device *SDpnt;
+    unsigned long flags;
+    
+    if (!device)
+        panic("No device passed to scsi_allocate_device().\n");
+    
+    host = device->host;
+    
+    spin_lock_irqsave(&device_request_lock, flags);
+    
+    while (1 == 1) {
+        SCpnt = NULL;
+        if (!device->device_blocked) {
+            if (device->single_lun) {
+                /*
+                 * FIXME(eric) - this is not at all optimal.  Given that
+                 * single lun devices are rare and usually slow
+                 * (i.e. CD changers), this is good enough for now, but
+                 * we may want to come back and optimize this later.
+                 *
+                 * Scan through all of the devices attached to this
+                 * host, and see if any are active or not.  If so,
+                 * we need to defer this command.
+                 *
+                 * We really need a busy counter per device.  This would
+                 * allow us to more easily figure out whether we should
+                 * do anything here or not.
+                 */
+                for (SDpnt = host->host_queue;
+                     SDpnt;
+                     SDpnt = SDpnt->next) {
+                    /*
+                     * Only look for other devices on the same bus
+                     * with the same target ID.
+                     */
+                    if (SDpnt->channel != device->channel
+                        || SDpnt->id != device->id
+                        || SDpnt == device) {
+                        continue;
+                    }
+                    if( atomic_read(&SDpnt->device_active) != 0)
+                    {
+                        break;
+                    }
+                }
+                if (SDpnt) {
+                    /*
+                     * Some other device in this cluster is busy.
+                     * If asked to wait, we need to wait, otherwise
+                     * return NULL.
+                     */
+                    SCpnt = NULL;
+                    goto busy;
+                }
+            }
+            /*
+             * Now we can check for a free command block for this device.
+             */
+            for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
+                if (SCpnt->request.rq_status == RQ_INACTIVE)
+                    break;
+            }
+        }
+        /*
+         * If we couldn't find a free command block, and we have been
+         * asked to wait, then do so.
+         */
+        if (SCpnt) {
+            break;
+        }
+    busy:
+        /*
+         * If we have been asked to wait for a free block, then
+         * wait here.
+         */
+        if (wait) {
+            printk("XXX smh: scsi cannot wait for free cmd block.\n"); 
+            BUG(); 
 #if 0 
-                        DECLARE_WAITQUEUE(wait, current);
-
-                        /*
-                         * We need to wait for a free commandblock.  We need to
-                         * insert ourselves into the list before we release the
-                         * lock.  This way if a block were released the same
-                         * microsecond that we released the lock, the call
-                         * to schedule() wouldn't block (well, it might switch,
-                         * but the current task will still be schedulable.
-                         */
-                        add_wait_queue(&device->scpnt_wait, &wait);
-                        if( interruptable ) {
-                                set_current_state(TASK_INTERRUPTIBLE);
-                        } else {
-                                set_current_state(TASK_UNINTERRUPTIBLE);
-                        }
-
-                        spin_unlock_irqrestore(&device_request_lock, flags);
-
-                       /*
-                        * This should block until a device command block
-                        * becomes available.
-                        */
-                        schedule();
-
-                       spin_lock_irqsave(&device_request_lock, flags);
-
-                        remove_wait_queue(&device->scpnt_wait, &wait);
-                        /*
-                         * FIXME - Isn't this redundant??  Someone
-                         * else will have forced the state back to running.
-                         */
-                        set_current_state(TASK_RUNNING);
-                        /*
-                         * In the event that a signal has arrived that we need
-                         * to consider, then simply return NULL.  Everyone
-                         * that calls us should be prepared for this
-                         * possibility, and pass the appropriate code back
-                         * to the user.
-                         */
-                        if( interruptable ) {
-                                if (signal_pending(current)) {
-                                        spin_unlock_irqrestore(&device_request_lock, flags);
-                                        return NULL;
-                                }
-                        }
+            DECLARE_WAITQUEUE(wait, current);
+            
+            /*
+             * We need to wait for a free commandblock.  We need to
+             * insert ourselves into the list before we release the
+             * lock.  This way if a block were released the same
+             * microsecond that we released the lock, the call
+             * to schedule() wouldn't block (well, it might switch,
+             * but the current task will still be schedulable.
+             */
+            add_wait_queue(&device->scpnt_wait, &wait);
+            if( interruptable ) {
+                set_current_state(TASK_INTERRUPTIBLE);
+            } else {
+                set_current_state(TASK_UNINTERRUPTIBLE);
+            }
+            
+            spin_unlock_irqrestore(&device_request_lock, flags);
+            
+            /*
+             * This should block until a device command block
+             * becomes available.
+             */
+            schedule();
+            
+            spin_lock_irqsave(&device_request_lock, flags);
+            
+            remove_wait_queue(&device->scpnt_wait, &wait);
+            /*
+             * FIXME - Isn't this redundant??  Someone
+             * else will have forced the state back to running.
+             */
+            set_current_state(TASK_RUNNING);
+            /*
+             * In the event that a signal has arrived that we need
+             * to consider, then simply return NULL.  Everyone
+             * that calls us should be prepared for this
+             * possibility, and pass the appropriate code back
+             * to the user.
+             */
+            if( interruptable ) {
+                if (signal_pending(current)) {
+                    spin_unlock_irqrestore(&device_request_lock, flags);
+                    return NULL;
+                }
+            }
 #endif
-               } else {
-                        spin_unlock_irqrestore(&device_request_lock, flags);
-                       return NULL;
-               }
-       }
-
-       SCpnt->request.rq_status = RQ_SCSI_BUSY;
-       SCpnt->request.waiting = NULL;  /* And no one is waiting for this
+        } else {
+            spin_unlock_irqrestore(&device_request_lock, flags);
+            return NULL;
+        }
+    }
+    
+    SCpnt->request.rq_status = RQ_SCSI_BUSY;
+    SCpnt->request.waiting = NULL;     /* And no one is waiting for this
                                         * to complete */
-       atomic_inc(&SCpnt->host->host_active);
-       atomic_inc(&SCpnt->device->device_active);
-
-       SCpnt->buffer  = NULL;
-       SCpnt->bufflen = 0;
-       SCpnt->request_buffer = NULL;
-       SCpnt->request_bufflen = 0;
-
-       SCpnt->use_sg = 0;      /* Reset the scatter-gather flag */
-       SCpnt->old_use_sg = 0;
-       SCpnt->transfersize = 0;        /* No default transfer size */
-       SCpnt->cmd_len = 0;
-
-       SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
-       SCpnt->sc_request = NULL;
-       SCpnt->sc_magic = SCSI_CMND_MAGIC;
-
-        SCpnt->result = 0;
-       SCpnt->underflow = 0;   /* Do not flag underflow conditions */
-       SCpnt->old_underflow = 0;
-       SCpnt->resid = 0;
-       SCpnt->state = SCSI_STATE_INITIALIZING;
-       SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
-
-       spin_unlock_irqrestore(&device_request_lock, flags);
-
-       SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
-                                  SCpnt->target,
-                               atomic_read(&SCpnt->host->host_active)));
-
-       return SCpnt;
+    atomic_inc(&SCpnt->host->host_active);
+    atomic_inc(&SCpnt->device->device_active);
+    
+    SCpnt->buffer  = NULL;
+    SCpnt->bufflen = 0;
+    SCpnt->request_buffer = NULL;
+    SCpnt->request_bufflen = 0;
+    
+    SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
+    SCpnt->old_use_sg = 0;
+    SCpnt->transfersize = 0;   /* No default transfer size */
+    SCpnt->cmd_len = 0;
+    
+    SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
+    SCpnt->sc_request = NULL;
+    SCpnt->sc_magic = SCSI_CMND_MAGIC;
+    
+    SCpnt->result = 0;
+    SCpnt->underflow = 0;      /* Do not flag underflow conditions */
+    SCpnt->old_underflow = 0;
+    SCpnt->resid = 0;
+    SCpnt->state = SCSI_STATE_INITIALIZING;
+    SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
+    
+    spin_unlock_irqrestore(&device_request_lock, flags);
+    
+    SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
+                               SCpnt->target,
+                               atomic_read(&SCpnt->host->host_active)));
+    
+    return SCpnt;
 }
 
 inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
 {
-       unsigned long flags;
-        Scsi_Device * SDpnt;
-
-       spin_lock_irqsave(&device_request_lock, flags);
-
-        SDpnt = SCpnt->device;
-
-       SCpnt->request.rq_status = RQ_INACTIVE;
-       SCpnt->state = SCSI_STATE_UNUSED;
-       SCpnt->owner = SCSI_OWNER_NOBODY;
-       atomic_dec(&SCpnt->host->host_active);
-       atomic_dec(&SDpnt->device_active);
-
-       SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
-                                  SCpnt->target,
-                                  atomic_read(&SCpnt->host->host_active),
-                                  SCpnt->host->host_failed));
-       if (SCpnt->host->host_failed != 0) {
-               SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
-                                               SCpnt->host->in_recovery,
-                                               SCpnt->host->eh_active));
-       }
-       /*
-        * If the host is having troubles, then look to see if this was the last
-        * command that might have failed.  If so, wake up the error handler.
-        */
-       if (SCpnt->host->in_recovery
-           && !SCpnt->host->eh_active
-           && SCpnt->host->host_busy == SCpnt->host->host_failed) {
-               SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
-                            atomic_read(&SCpnt->host->eh_wait->count)));
+    unsigned long flags;
+    Scsi_Device * SDpnt;
+    
+    spin_lock_irqsave(&device_request_lock, flags);
+    
+    SDpnt = SCpnt->device;
+    
+    SCpnt->request.rq_status = RQ_INACTIVE;
+    SCpnt->state = SCSI_STATE_UNUSED;
+    SCpnt->owner = SCSI_OWNER_NOBODY;
+    atomic_dec(&SCpnt->host->host_active);
+    atomic_dec(&SDpnt->device_active);
+    
+    SCSI_LOG_MLQUEUE(5, printk(
+        "Deactivating command for device %d (active=%d, failed=%d)\n",
+        SCpnt->target,
+        atomic_read(&SCpnt->host->host_active),
+        SCpnt->host->host_failed));
+    if (SCpnt->host->host_failed != 0) {
+        SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
+                                          SCpnt->host->in_recovery,
+                                          SCpnt->host->eh_active));
+    }
+    /*
+     * If the host is having troubles, then look to see if this was the last
+     * command that might have failed.  If so, wake up the error handler.
+     */
+    if (SCpnt->host->in_recovery
+        && !SCpnt->host->eh_active
+        && SCpnt->host->host_busy == SCpnt->host->host_failed) {
 #if 0
-               up(SCpnt->host->eh_wait);
+        SCSI_LOG_ERROR_RECOVERY(5, printk(
+            "Waking error handler thread (%d)\n",
+            atomic_read(&SCpnt->host->eh_wait->count)));
+        up(SCpnt->host->eh_wait);
 #endif
-       }
-
-       spin_unlock_irqrestore(&device_request_lock, flags);
-
+    }
+    
+    spin_unlock_irqrestore(&device_request_lock, flags);
+    
 #if 0
-        /*
-         * Wake up anyone waiting for this device.  Do this after we
-         * have released the lock, as they will need it as soon as
-         * they wake up.  
-         */
-       wake_up(&SDpnt->scpnt_wait);
+    /*
+     * Wake up anyone waiting for this device.  Do this after we
+     * have released the lock, as they will need it as soon as
+     * they wake up.  
+     */
+    wake_up(&SDpnt->scpnt_wait);
 #endif
-
+    
 }
 
 /*
@@ -611,21 +617,21 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
  */
 void scsi_release_command(Scsi_Cmnd * SCpnt)
 {
-        request_queue_t *q;
-        Scsi_Device * SDpnt;
-
-        SDpnt = SCpnt->device;
-
-        __scsi_release_command(SCpnt);
-
-        /*
-         * Finally, hit the queue request function to make sure that
-         * the device is actually busy if there are requests present.
-         * This won't block - if the device cannot take any more, life
-         * will go on.  
-         */
-        q = &SDpnt->request_queue;
-        scsi_queue_next_request(q, NULL);                
+    request_queue_t *q;
+    Scsi_Device * SDpnt;
+    
+    SDpnt = SCpnt->device;
+    
+    __scsi_release_command(SCpnt);
+    
+    /*
+     * Finally, hit the queue request function to make sure that
+     * the device is actually busy if there are requests present.
+     * This won't block - if the device cannot take any more, life
+     * will go on.  
+     */
+    q = &SDpnt->request_queue;
+    scsi_queue_next_request(q, NULL);                
 }
 
 /*
@@ -640,153 +646,158 @@ void scsi_release_command(Scsi_Cmnd * SCpnt)
 int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
 {
 #ifdef DEBUG_DELAY
-       unsigned long clock;
+    unsigned long clock;
 #endif
-       struct Scsi_Host *host;
-       int rtn = 0;
-       unsigned long flags = 0;
-       unsigned long timeout;
-
-       ASSERT_LOCK(&io_request_lock, 0);
-
+    struct Scsi_Host *host;
+    int rtn = 0;
+    unsigned long flags = 0;
+    unsigned long timeout;
+    
+    ASSERT_LOCK(&io_request_lock, 0);
+    
 #if DEBUG
-       unsigned long *ret = 0;
+    unsigned long *ret = 0;
 #ifdef __mips__
-       __asm__ __volatile__("move\t%0,$31":"=r"(ret));
+    __asm__ __volatile__("move\t%0,$31":"=r"(ret));
 #else
-       ret = __builtin_return_address(0);
+    ret = __builtin_return_address(0);
 #endif
 #endif
-
-       host = SCpnt->host;
-
-       /* Assign a unique nonzero serial_number. */
-       if (++serial_number == 0)
-               serial_number = 1;
-       SCpnt->serial_number = serial_number;
-       SCpnt->pid = scsi_pid++;
-
-       /*
-        * We will wait MIN_RESET_DELAY clock ticks after the last reset so
-        * we can avoid the drive not being ready.
-        */
-       timeout = host->last_reset + MIN_RESET_DELAY;
-
-       if (host->resetting && time_before(jiffies, timeout)) {
-               int ticks_remaining = timeout - jiffies;
-               /*
-                * NOTE: This may be executed from within an interrupt
-                * handler!  This is bad, but for now, it'll do.  The irq
-                * level of the interrupt handler has been masked out by the
-                * platform dependent interrupt handling code already, so the
-                * sti() here will not cause another call to the SCSI host's
-                * interrupt handler (assuming there is one irq-level per
-                * host).
-                */
-               while (--ticks_remaining >= 0)
-                       mdelay(1 + 999 / HZ);
-               host->resetting = 0;
-       }
-       if (host->hostt->use_new_eh_code) {
-               scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
-       } else {
+    
+    host = SCpnt->host;
+    
+    /* Assign a unique nonzero serial_number. */
+    if (++serial_number == 0)
+        serial_number = 1;
+    SCpnt->serial_number = serial_number;
+    SCpnt->pid = scsi_pid++;
+    
+    /*
+     * We will wait MIN_RESET_DELAY clock ticks after the last reset so
+     * we can avoid the drive not being ready.
+     */
+    timeout = host->last_reset + MIN_RESET_DELAY;
+    
+    if (host->resetting && time_before(jiffies, timeout)) {
+        int ticks_remaining = timeout - jiffies;
+        /*
+         * NOTE: This may be executed from within an interrupt
+         * handler!  This is bad, but for now, it'll do.  The irq
+         * level of the interrupt handler has been masked out by the
+         * platform dependent interrupt handling code already, so the
+         * sti() here will not cause another call to the SCSI host's
+         * interrupt handler (assuming there is one irq-level per
+         * host).
+         */
+        while (--ticks_remaining >= 0)
+            mdelay(1 + 999 / HZ);
+        host->resetting = 0;
+    }
+    if (host->hostt->use_new_eh_code) {
+        scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
+    } else {
 #if 0
-               scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
-                              scsi_old_times_out);
+        scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
+                       scsi_old_times_out);
 #endif
-       }
-
-       /*
-        * We will use a queued command if possible, otherwise we will emulate the
-        * queuing and calling of completion function ourselves.
-        */
-       SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
-              "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
-       SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
-                           SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
-
-       SCpnt->state = SCSI_STATE_QUEUED;
-       SCpnt->owner = SCSI_OWNER_LOWLEVEL;
-       if (host->can_queue) {
-               SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
-                                          host->hostt->queuecommand));
-               /*
-                * Use the old error handling code if we haven't converted the driver
-                * to use the new one yet.  Note - only the new queuecommand variant
-                * passes a meaningful return value.
-                */
-               if (host->hostt->use_new_eh_code) {
-                       /*
-                        * Before we queue this command, check if the command
-                        * length exceeds what the host adapter can handle.
-                        */
-                       if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
-                               spin_lock_irqsave(&io_request_lock, flags);
-                               rtn = host->hostt->queuecommand(SCpnt, scsi_done);
-                               spin_unlock_irqrestore(&io_request_lock, flags);
-                               if (rtn != 0) {
-                                       scsi_delete_timer(SCpnt);
-                                       scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
-                                       SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));                                
-                               }
-                       } else {
-                               SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
-                               SCpnt->result = (DID_ABORT << 16);
-                               spin_lock_irqsave(&io_request_lock, flags);
-                               scsi_done(SCpnt);
-                               spin_unlock_irqrestore(&io_request_lock, flags);
-                               rtn = 1;
-                       }
-               } else {
-                       /*
-                        * Before we queue this command, check if the command
-                        * length exceeds what the host adapter can handle.
-                        */
+    }
+    
+    /*
+     * We will use a queued command if possible, otherwise we will emulate the
+     * queuing and calling of completion function ourselves.
+     */
+    SCSI_LOG_MLQUEUE(3, printk(
+        "scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
+        "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
+        SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
+        SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
+    
+    SCpnt->state = SCSI_STATE_QUEUED;
+    SCpnt->owner = SCSI_OWNER_LOWLEVEL;
+    if (host->can_queue) {
+        SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
+                                   host->hostt->queuecommand));
+        /*
+         * Use the old error handling code if we haven't converted the driver
+         * to use the new one yet.  Note - only the new queuecommand variant
+         * passes a meaningful return value.
+         */
+        if (host->hostt->use_new_eh_code) {
+            /*
+             * Before we queue this command, check if the command
+             * length exceeds what the host adapter can handle.
+             */
+            if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
+                spin_lock_irqsave(&io_request_lock, flags);
+                rtn = host->hostt->queuecommand(SCpnt, scsi_done);
+                spin_unlock_irqrestore(&io_request_lock, flags);
+                if (rtn != 0) {
+                    scsi_delete_timer(SCpnt);
+                    scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
+                    SCSI_LOG_MLQUEUE(3, printk(
+                        "queuecommand : request rejected\n")); 
+                }
+            } else {
+                SCSI_LOG_MLQUEUE(3, printk(
+                    "queuecommand : command too long.\n"));
+                SCpnt->result = (DID_ABORT << 16);
+                spin_lock_irqsave(&io_request_lock, flags);
+                scsi_done(SCpnt);
+                spin_unlock_irqrestore(&io_request_lock, flags);
+                rtn = 1;
+            }
+        } else {
+            /*
+             * Before we queue this command, check if the command
+             * length exceeds what the host adapter can handle.
+             */
 #if 0
-                    if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
-                               spin_lock_irqsave(&io_request_lock, flags);
-                               host->hostt->queuecommand(SCpnt, scsi_old_done);
-                               spin_unlock_irqrestore(&io_request_lock, flags);
-                       } else {
-                               SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
-                               SCpnt->result = (DID_ABORT << 16);
-                               spin_lock_irqsave(&io_request_lock, flags);
-                               scsi_old_done(SCpnt);
-                               spin_unlock_irqrestore(&io_request_lock, flags);
-                               rtn = 1;
-                       }
-#endif
-
-               }
-       } else {
-               int temp;
-
-               SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n", host->hostt->command));
+            if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
                 spin_lock_irqsave(&io_request_lock, flags);
-               temp = host->hostt->command(SCpnt);
-               SCpnt->result = temp;
-#ifdef DEBUG_DELAY
+                host->hostt->queuecommand(SCpnt, scsi_old_done);
                 spin_unlock_irqrestore(&io_request_lock, flags);
-               clock = jiffies + 4 * HZ;
-               while (time_before(jiffies, clock)) {
-                       barrier();
-                       cpu_relax();
-               }
-               printk("done(host = %d, result = %04x) : routine at %p\n",
-                      host->host_no, temp, host->hostt->command);
+            } else {
+                SCSI_LOG_MLQUEUE(3, printk(
+                    "queuecommand : command too long.\n"));
+                SCpnt->result = (DID_ABORT << 16);
                 spin_lock_irqsave(&io_request_lock, flags);
+                scsi_old_done(SCpnt);
+                spin_unlock_irqrestore(&io_request_lock, flags);
+                rtn = 1;
+            }
 #endif
-               if (host->hostt->use_new_eh_code) {
-                       scsi_done(SCpnt);
-               } else {
+            
+        }
+    } else {
+        int temp;
+        
+        SCSI_LOG_MLQUEUE(3, printk(
+            "command() :  routine at %p\n", host->hostt->command));
+        spin_lock_irqsave(&io_request_lock, flags);
+        temp = host->hostt->command(SCpnt);
+        SCpnt->result = temp;
+#ifdef DEBUG_DELAY
+        spin_unlock_irqrestore(&io_request_lock, flags);
+        clock = jiffies + 4 * HZ;
+        while (time_before(jiffies, clock)) {
+            barrier();
+            cpu_relax();
+        }
+        printk("done(host = %d, result = %04x) : routine at %p\n",
+               host->host_no, temp, host->hostt->command);
+        spin_lock_irqsave(&io_request_lock, flags);
+#endif
+        if (host->hostt->use_new_eh_code) {
+            scsi_done(SCpnt);
+        } else {
 #if 0
-                       scsi_old_done(SCpnt);
+            scsi_old_done(SCpnt);
 #endif
-               }
-                spin_unlock_irqrestore(&io_request_lock, flags);
-       }
-       SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
-       return rtn;
+        }
+        spin_unlock_irqrestore(&io_request_lock, flags);
+    }
+    SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
+    return rtn;
 }
 
 #ifdef DEVFS_MUST_DIE
@@ -806,6 +817,8 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
 {
 #if 0
     DECLARE_COMPLETION(wait);
+#else 
+    int wait = 1; 
 #endif
 
 
@@ -813,6 +826,8 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
     
 #if 0
     SRpnt->sr_request.waiting = &wait;
+#else 
+    SRpnt->sr_request.waiting = (void *)&wait; 
 #endif
 
 
@@ -824,28 +839,31 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
 
 #if 0
     wait_for_completion(&wait);
-#endif
+    SRpnt->sr_request.waiting = NULL;
+#else 
 
     /* XXX SMH: in 'standard' driver we think everythings ok here since
        we've waited on &wait -- hence we deallocate the command structure
        if it hasn't been done already. This is not the correct behaviour 
        in xen ... hmm .. how to fix? */
-    mdelay(500); 
-
+    int usecs = 0; 
+//    printk("scsi_wait_req: about to poll-wait, request is at %p\n", 
+//           SRpnt->sr_request); 
+    while(*(int *)(SRpnt->sr_request.waiting)) {
+        udelay(500); 
+        usecs += 500; 
+        if(usecs > 1000000) {
+            printk("scsi_wait_req: giving up after 1 seconds!\n"); 
+            *(int *)(SRpnt->sr_request.waiting) = 0; 
+        } 
+    }
+#endif
 
-    SRpnt->sr_request.waiting = NULL;
 
     if( SRpnt->sr_command != NULL )
     {
-#ifdef SMH_DEBUG
-        printk("scsi_wait_req: releasing SRpnt->sr_command = %p\n", 
-               SRpnt->sr_command); 
-#endif
         scsi_release_command(SRpnt->sr_command);
         SRpnt->sr_command = NULL;
-#ifdef SMHHACK 
-        SRpnt->freeaddr = 0x99991234; 
-#endif
     }
     
 }
@@ -880,87 +898,77 @@ void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
              void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
                 int timeout, int retries)
 {
-       Scsi_Device * SDpnt = SRpnt->sr_device;
-       struct Scsi_Host *host = SDpnt->host;
-
-       ASSERT_LOCK(&io_request_lock, 0);
-
-       SCSI_LOG_MLQUEUE(4,
-                        {
-                        int i;
-                        int target = SDpnt->id;
-                        int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
-                        printk("scsi_do_req (host = %d, channel = %d target = %d, "
-                   "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
-                               "retries = %d)\n"
-                               "command : ", host->host_no, SDpnt->channel, target, buffer,
-                               bufflen, done, timeout, retries);
-                        for (i  = 0; i < size; ++i)
-                               printk("%02x  ", ((unsigned char *) cmnd)[i]);
-                               printk("\n");
-                        });
-
-       if (!host) {
-               panic("Invalid or not present host.\n");
-       }
-
-       /*
-        * If the upper level driver is reusing these things, then
-        * we should release the low-level block now.  Another one will
-        * be allocated later when this request is getting queued.
-        */
-       if( SRpnt->sr_command != NULL )
-       { 
-#ifdef SMH_DEBUG
-           printk("scsi_do_req: releasing SRpnt->sr_command = %p\n", 
-                   SRpnt->sr_command); 
-#endif
-               scsi_release_command(SRpnt->sr_command);
-               SRpnt->sr_command = NULL;
-#ifdef SMHHACK
-                SRpnt->freeaddr = 0xabbadead;
-#endif
-       }
-
-       /*
-        * We must prevent reentrancy to the lowlevel host driver.
-        * This prevents it - we enter a loop until the host we want
-        * to talk to is not busy.  Race conditions are prevented, as
-        * interrupts are disabled in between the time we check for
-        * the host being not busy, and the time we mark it busy
-        * ourselves.  */
-
-
-       /*
-        * Our own function scsi_done (which marks the host as not
-        * busy, disables the timeout counter, etc) will be called by
-        * us or by the scsi_hosts[host].queuecommand() function needs
-        * to also call the completion function for the high level
-        * driver.  */
-
-       memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd, 
-              sizeof(SRpnt->sr_cmnd));
-#ifdef SMHHACK
-        SRpnt->freeaddr = 0x1111; 
-#endif
-
-       SRpnt->sr_bufflen = bufflen;
-       SRpnt->sr_buffer = buffer;
-       SRpnt->sr_allowed = retries;
-       SRpnt->sr_done = done;
-       SRpnt->sr_timeout_per_command = timeout;
-
-       if (SRpnt->sr_cmd_len == 0)
-               SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
-
-       /*
-        * At this point, we merely set up the command, stick it in the normal
-        * request queue, and return.  Eventually that request will come to the
-        * top of the list, and will be dispatched.
-        */
-       scsi_insert_special_req(SRpnt, 0);
-
-       SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
+    Scsi_Device * SDpnt = SRpnt->sr_device;
+    struct Scsi_Host *host = SDpnt->host;
+    
+    ASSERT_LOCK(&io_request_lock, 0);
+    
+    SCSI_LOG_MLQUEUE(4,
+    {
+        int i;
+        int target = SDpnt->id;
+        int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
+        printk("scsi_do_req (host = %d, channel = %d target = %d, "
+               "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
+               "retries = %d)\n"
+               "command : ", host->host_no, SDpnt->channel, target, buffer,
+               bufflen, done, timeout, retries);
+        for (i  = 0; i < size; ++i)
+            printk("%02x  ", ((unsigned char *) cmnd)[i]);
+        printk("\n");
+    });
+    
+    if (!host) {
+        panic("Invalid or not present host.\n");
+    }
+    
+    /*
+     * If the upper level driver is reusing these things, then
+     * we should release the low-level block now.  Another one will
+     * be allocated later when this request is getting queued.
+     */
+    if( SRpnt->sr_command != NULL )
+    { 
+        scsi_release_command(SRpnt->sr_command);
+        SRpnt->sr_command = NULL;
+    }
+    
+    /*
+     * We must prevent reentrancy to the lowlevel host driver.
+     * This prevents it - we enter a loop until the host we want
+     * to talk to is not busy.  Race conditions are prevented, as
+     * interrupts are disabled in between the time we check for
+     * the host being not busy, and the time we mark it busy
+     * ourselves.  */
+    
+    
+    /*
+     * Our own function scsi_done (which marks the host as not
+     * busy, disables the timeout counter, etc) will be called by
+     * us or by the scsi_hosts[host].queuecommand() function needs
+     * to also call the completion function for the high level
+     * driver.  */
+    
+    memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd, 
+           sizeof(SRpnt->sr_cmnd));
+    
+    SRpnt->sr_bufflen = bufflen;
+    SRpnt->sr_buffer = buffer;
+    SRpnt->sr_allowed = retries;
+    SRpnt->sr_done = done;
+    SRpnt->sr_timeout_per_command = timeout;
+    
+    if (SRpnt->sr_cmd_len == 0)
+        SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
+    
+    /*
+     * At this point, we merely set up the command, stick it in the normal
+     * request queue, and return.  Eventually that request will come to the
+     * top of the list, and will be dispatched.
+     */
+    scsi_insert_special_req(SRpnt, 0);
+    
+    SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
 }
  
 /*
@@ -995,10 +1003,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
 
        SCpnt->owner = SCSI_OWNER_MIDLEVEL;
        SRpnt->sr_command = SCpnt;
-#ifdef SMH_DEBUG
-        printk("scsi_init_cmd_from_req: SRpnt = %p, SRpnt->sr_command = %p\n", 
-               SRpnt, SRpnt->sr_command); 
-#endif        
 
        if (!host) {
                panic("Invalid or not present host.\n");
@@ -1381,10 +1385,10 @@ void scsi_bottom_half_handler(void)
                      * see if this was the last command that might
                      * have failed.  If so, wake up the error handler.  */
                     if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
+#if 0
                         SCSI_LOG_ERROR_RECOVERY(5, printk(
                             "Waking error handler thread (%d)\n",
                             atomic_read(&SCpnt->host->eh_wait->count)));
-#if 0
                         up(SCpnt->host->eh_wait);
 #endif
                     }
@@ -2696,8 +2700,6 @@ int __init scsi_setup(char *str)
 __setup("scsihosts=", scsi_setup);
 #endif
 
-static spinlock_t slock2 = SPIN_LOCK_UNLOCKED; 
-
 static int __init init_scsi(void)
 {
 #ifdef CONFIG_PROC_FS
@@ -2706,14 +2708,6 @@ static int __init init_scsi(void)
 
        printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
 
-        {
-            unsigned long flags; 
-            
-            spin_lock_irqsave(&slock2, flags); 
-            spin_unlock_irqrestore(&slock2, flags); 
-            printk("SCSI start of day -- flags = %lx\n", flags); 
-        }
-
         if( scsi_init_minimal_dma_pool() != 0 )
         {
                 return 1;
@@ -2749,18 +2743,11 @@ static int __init init_scsi(void)
         */
        init_bh(SCSI_BH, scsi_bottom_half_handler);
 
-        {
-            unsigned long flags; 
-            
-            spin_lock_irqsave(&slock2, flags); 
-            spin_unlock_irqrestore(&slock2, flags); 
-            printk("SCSI end of day -- flags = %lx\n", flags); 
-        }
-
-
        return 0;
 }
 
+
+
 static void __exit exit_scsi(void)
 {
        Scsi_Host_Name *shn, *shn2 = NULL;
@@ -2973,6 +2960,7 @@ scsi_reset_provider(Scsi_Device *dev, int flag)
                rtn = scsi_old_reset(SCpnt, flag);
                spin_unlock_irqrestore(&io_request_lock, flags);
 #endif
+                rtn= 0; 
        }
 
        scsi_delete_timer(SCpnt);
index 338bca8f7b9e4758d756db8bbe0b1fc1bed270ca..fc39615fc75135a7e5c7cbcd8e0cd903494b7c51 100644 (file)
@@ -633,18 +633,18 @@ struct scsi_device {
  * with low level drivers that support multiple outstanding commands.
  */
 typedef struct scsi_pointer {
-       char *ptr;              /* data pointer */
-       int this_residual;      /* left in this buffer */
-       struct scatterlist *buffer;     /* which buffer */
-       int buffers_residual;   /* how many buffers left */
-
-        dma_addr_t dma_handle;
-
-       volatile int Status;
-       volatile int Message;
-       volatile int have_data_in;
-       volatile int sent_command;
-       volatile int phase;
+    char *ptr;         /* data pointer */
+    int this_residual; /* left in this buffer */
+    struct scatterlist *buffer;        /* which buffer */
+    int buffers_residual;      /* how many buffers left */
+    
+    dma_addr_t dma_handle;
+    
+    volatile int Status;
+    volatile int Message;
+    volatile int have_data_in;
+    volatile int sent_command;
+    volatile int phase;
 } Scsi_Pointer;
 
 /*
@@ -685,146 +685,149 @@ struct scsi_request {
 };
 
 /*
- * FIXME(eric) - one of the great regrets that I have is that I failed to define
- * these structure elements as something like sc_foo instead of foo.  This would
- * make it so much easier to grep through sources and so forth.  I propose that
- * all new elements that get added to these structures follow this convention.
- * As time goes on and as people have the stomach for it, it should be possible to 
- * go back and retrofit at least some of the elements here with with the prefix.
- */
+ * FIXME(eric) - one of the great regrets that I have is that I failed
+ * to define these structure elements as something like sc_foo instead
+ * of foo.  This would make it so much easier to grep through sources
+ * and so forth.  I propose that all new elements that get added to
+ * these structures follow this convention.  As time goes on and as
+ * people have the stomach for it, it should be possible to go back
+ * and retrofit at least some of the elements here with with the
+ * prefix.  
+*/
+
 struct scsi_cmnd {
-       int     sc_magic;
+    int     sc_magic;
 /* private: */
-       /*
-        * This information is private to the scsi mid-layer.  Wrapping it in a
-        * struct private is a way of marking it in a sort of C++ type of way.
-        */
-       struct Scsi_Host *host;
-       unsigned short state;
-       unsigned short owner;
-       Scsi_Device *device;
-       Scsi_Request *sc_request;
-       struct scsi_cmnd *next;
-       struct scsi_cmnd *reset_chain;
-
-       int eh_state;           /* Used for state tracking in error handlr */
-       void (*done) (struct scsi_cmnd *);      /* Mid-level done function */
-       /*
-          A SCSI Command is assigned a nonzero serial_number when internal_cmnd
-          passes it to the driver's queue command function.  The serial_number
-          is cleared when scsi_done is entered indicating that the command has
-          been completed.  If a timeout occurs, the serial number at the moment
-          of timeout is copied into serial_number_at_timeout.  By subsequently
-          comparing the serial_number and serial_number_at_timeout fields
-          during abort or reset processing, we can detect whether the command
-          has already completed.  This also detects cases where the command has
-          completed and the SCSI Command structure has already being reused
-          for another command, so that we can avoid incorrectly aborting or
-          resetting the new command.
-        */
-
-       unsigned long serial_number;
-       unsigned long serial_number_at_timeout;
-
-       int retries;
-       int allowed;
-       int timeout_per_command;
-       int timeout_total;
-       int timeout;
-
-       /*
-        * We handle the timeout differently if it happens when a reset, 
-        * abort, etc are in process. 
-        */
-       unsigned volatile char internal_timeout;
-       struct scsi_cmnd *bh_next;      /* To enumerate the commands waiting 
-                                          to be processed. */
-
+    /*
+     * This information is private to the scsi mid-layer.  Wrapping it in a
+     * struct private is a way of marking it in a sort of C++ type of way.
+     */
+    struct Scsi_Host *host;
+    unsigned short state;
+    unsigned short owner;
+    Scsi_Device *device;
+    Scsi_Request *sc_request;
+    struct scsi_cmnd *next;
+    struct scsi_cmnd *reset_chain;
+    
+    int eh_state;              /* Used for state tracking in error handlr */
+    void (*done) (struct scsi_cmnd *); /* Mid-level done function */
+    /*
+      A SCSI Command is assigned a nonzero serial_number when internal_cmnd
+      passes it to the driver's queue command function.  The serial_number
+      is cleared when scsi_done is entered indicating that the command has
+      been completed.  If a timeout occurs, the serial number at the moment
+      of timeout is copied into serial_number_at_timeout.  By subsequently
+      comparing the serial_number and serial_number_at_timeout fields
+      during abort or reset processing, we can detect whether the command
+      has already completed.  This also detects cases where the command has
+      completed and the SCSI Command structure has already being reused
+      for another command, so that we can avoid incorrectly aborting or
+      resetting the new command.
+    */
+    
+    unsigned long serial_number;
+    unsigned long serial_number_at_timeout;
+
+    int retries;
+    int allowed;
+    int timeout_per_command;
+    int timeout_total;
+    int timeout;
+    
+    /*
+     * We handle the timeout differently if it happens when a reset, 
+     * abort, etc are in process. 
+     */
+    unsigned volatile char internal_timeout;
+    struct scsi_cmnd *bh_next; /* To enumerate the commands waiting 
+                                   to be processed. */
+    
 /* public: */
-
-       unsigned int target;
-       unsigned int lun;
-       unsigned int channel;
-       unsigned char cmd_len;
-       unsigned char old_cmd_len;
-       unsigned char sc_data_direction;
-       unsigned char sc_old_data_direction;
-
-       /* These elements define the operation we are about to perform */
-       unsigned char cmnd[MAX_COMMAND_SIZE];
-       unsigned request_bufflen;       /* Actual request size */
-
-       struct timer_list eh_timeout;   /* Used to time out the command. */
-       void *request_buffer;           /* Actual requested buffer */
-        void **bounce_buffers;         /* Array of bounce buffers when using scatter-gather */
-
-       /* These elements define the operation we ultimately want to perform */
-       unsigned char data_cmnd[MAX_COMMAND_SIZE];
-       unsigned short old_use_sg;      /* We save  use_sg here when requesting
-                                        * sense info */
-       unsigned short use_sg;  /* Number of pieces of scatter-gather */
-       unsigned short sglist_len;      /* size of malloc'd scatter-gather list */
-       unsigned short abort_reason;    /* If the mid-level code requests an
+    
+    unsigned int target;
+    unsigned int lun;
+    unsigned int channel;
+    unsigned char cmd_len;
+    unsigned char old_cmd_len;
+    unsigned char sc_data_direction;
+    unsigned char sc_old_data_direction;
+    
+    /* These elements define the operation we are about to perform */
+    unsigned char cmnd[MAX_COMMAND_SIZE];
+    unsigned request_bufflen;  /* Actual request size */
+    
+    struct timer_list eh_timeout; /* Used to time out the command. */
+    void *request_buffer;  /* Actual requested buffer */
+    void **bounce_buffers; /* Array of bounce buffers when 
+                              using scatter-gather */
+    
+    /* These elements define the operation we ultimately want to perform */
+    unsigned char data_cmnd[MAX_COMMAND_SIZE];
+    unsigned short old_use_sg; /* We save  use_sg here when requesting
+                                 * sense info */
+    unsigned short use_sg;     /* Number of pieces of scatter-gather */
+    unsigned short sglist_len; /* size of malloc'd scatter-gather list */
+    unsigned short abort_reason;       /* If the mid-level code requests an
                                         * abort, this is the reason. */
-       unsigned bufflen;       /* Size of data buffer */
-       void *buffer;           /* Data buffer */
-
-       unsigned underflow;     /* Return error if less than
-                                  this amount is transferred */
-       unsigned old_underflow; /* save underflow here when reusing the
+    unsigned bufflen;  /* Size of data buffer */
+    void *buffer;              /* Data buffer */
+    
+    unsigned underflow;        /* Return error if less than
+                           this amount is transferred */
+    unsigned old_underflow;    /* save underflow here when reusing the
                                 * command for error handling */
-
-       unsigned transfersize;  /* How much we are guaranteed to
+    
+    unsigned transfersize;     /* How much we are guaranteed to
                                   transfer with each SCSI transfer
                                   (ie, between disconnect / 
                                   reconnects.   Probably == sector
                                   size */
-
-       int resid;              /* Number of bytes requested to be
-                                  transferred less actual number
-                                  transferred (0 if not supported) */
-
-       struct request request; /* A copy of the command we are
+    
+    int resid;         /* Number of bytes requested to be
+                           transferred less actual number
+                           transferred (0 if not supported) */
+    
+    struct request request;    /* A copy of the command we are
                                   working on */
+    
+    unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];         
+    /* obtained by REQUEST SENSE when CHECK CONDITION is
+     * received on original command (auto-sense) */
 
-       unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];              /* obtained by REQUEST SENSE
-                                                * when CHECK CONDITION is
-                                                * received on original command 
-                                                * (auto-sense) */
-
-       unsigned flags;
-
-       /*
-        * Used to indicate that a command which has timed out also
-        * completed normally.  Typically the completion function will
-        * do nothing but set this flag in this instance because the
-        * timeout handler is already running.
-        */
-       unsigned done_late:1;
-
-       /* Low-level done function - can be used by low-level driver to point
-        *        to completion function.  Not used by mid/upper level code. */
-       void (*scsi_done) (struct scsi_cmnd *);
-
-       /*
-        * The following fields can be written to by the host specific code. 
-        * Everything else should be left alone. 
-        */
-
-       Scsi_Pointer SCp;       /* Scratchpad used by some host adapters */
-
-       unsigned char *host_scribble;   /* The host adapter is allowed to
-                                          * call scsi_malloc and get some memory
-                                          * and hang it here.     The host adapter
-                                          * is also expected to call scsi_free
-                                          * to release this memory.  (The memory
-                                          * obtained by scsi_malloc is guaranteed
-                                          * to be at an address < 16Mb). */
+    unsigned flags;
+    
+    /*
+     * Used to indicate that a command which has timed out also
+     * completed normally.  Typically the completion function will
+     * do nothing but set this flag in this instance because the
+     * timeout handler is already running.
+     */
+    unsigned done_late:1;
+    
+    /* Low-level done function - can be used by low-level driver to point
+     *        to completion function.  Not used by mid/upper level code. */
+    void (*scsi_done) (struct scsi_cmnd *);
+    
+    /*
+     * The following fields can be written to by the host specific code. 
+     * Everything else should be left alone. 
+     */
+    
+    Scsi_Pointer SCp;  /* Scratchpad used by some host adapters */
+    
+    unsigned char *host_scribble;      
 
-       int result;             /* Status code from lower level driver */
+    /* The host adapter is allowed to call scsi_malloc and get some
+     * memory and hang it here.  The host adapter is also expected to
+     * call scsi_free to release this memory.  (The memory obtained
+     * by scsi_malloc is guaranteed to be at an address < 16Mb). */
 
-       unsigned char tag;      /* SCSI-II queued command tag */
-       unsigned long pid;      /* Process ID, starts at 0 */
+    
+    int result;                /* Status code from lower level driver */
+    
+    unsigned char tag; /* SCSI-II queued command tag */
+    unsigned long pid; /* Process ID, starts at 0 */
 };
 
 /*
index 6c043937bed929ea311813fd347401ff5ecd500e..1287b9a99bd9c74d3d29d82d625e0872d3cb32bd 100644 (file)
@@ -1914,9 +1914,9 @@ void scsi_error_handler(void *data)
        /*
         * Wake up the thread that created us.
         */
+#if 0
        SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent %d\n", host->eh_notify->count.counter));
 
-#if 0
        up(host->eh_notify);
 #endif
 
index 8c32bf547fd784ab52ae574d79a03a6f41bfc47b..57798389fc49066d7baa7f39ecc914f69bb7c473 100644 (file)
@@ -73,7 +73,7 @@ static void __scsi_insert_special(request_queue_t *q, struct request *rq,
     unsigned long flags;
     
     ASSERT_LOCK(&io_request_lock, 0);
-    
+
     rq->cmd = SPECIAL;
     rq->special = data;
     rq->q = NULL;
@@ -362,92 +362,98 @@ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
                                     int requeue,
                                     int frequeue)
 {
-       struct request *req;
-       struct buffer_head *bh;
-        Scsi_Device * SDpnt;
-       int nsect;
-
-       ASSERT_LOCK(&io_request_lock, 0);
-
-       req = &SCpnt->request;
-       req->errors = 0;
-       if (!uptodate) {
-               printk(" I/O error: dev %s, sector %lu\n",
-                      kdevname(req->rq_dev), req->sector);
-       }
-       do {
-               if ((bh = req->bh) != NULL) {
-                       nsect = bh->b_size >> 9;
-                       blk_finished_io(nsect);
-                       req->bh = bh->b_reqnext;
-                       bh->b_reqnext = NULL;
-                       sectors -= nsect;
-                       bh->b_end_io(bh, uptodate);
-                       if ((bh = req->bh) != NULL) {
-                               req->hard_sector += nsect;
-                               req->hard_nr_sectors -= nsect;
-                               req->sector += nsect;
-                               req->nr_sectors -= nsect;
-
-                               req->current_nr_sectors = bh->b_size >> 9;
-                               if (req->nr_sectors < req->current_nr_sectors) {
-                                       req->nr_sectors = req->current_nr_sectors;
-                                       printk("scsi_end_request: buffer-list destroyed\n");
-                               }
-                       }
-               }
-       } while (sectors && bh);
-
-       /*
-        * If there are blocks left over at the end, set up the command
-        * to queue the remainder of them.
-        */
-       if (req->bh) {
-                request_queue_t *q;
-
-               if( !requeue )
-               {
-                       return SCpnt;
+    struct request *req;
+    struct buffer_head *bh;
+    Scsi_Device * SDpnt;
+    int nsect;
+    
+    ASSERT_LOCK(&io_request_lock, 0);
+    
+    req = &SCpnt->request;
+    req->errors = 0;
+    if (!uptodate) {
+       printk(" I/O error: dev %s, sector %lu\n",
+              kdevname(req->rq_dev), req->sector);
+    }
+    do {
+       if ((bh = req->bh) != NULL) {
+           nsect = bh->b_size >> 9;
+           blk_finished_io(nsect);
+           req->bh = bh->b_reqnext;
+           bh->b_reqnext = NULL;
+           sectors -= nsect;
+           bh->b_end_io(bh, uptodate);
+           if ((bh = req->bh) != NULL) {
+               req->hard_sector += nsect;
+               req->hard_nr_sectors -= nsect;
+               req->sector += nsect;
+               req->nr_sectors -= nsect;
+               
+               req->current_nr_sectors = bh->b_size >> 9;
+               if (req->nr_sectors < req->current_nr_sectors) {
+                   req->nr_sectors = req->current_nr_sectors;
+                   printk("scsi_end_request: buffer-list destroyed\n");
                }
-
-                q = &SCpnt->device->request_queue;
-
-               req->buffer = bh->b_data;
-               /*
-                * Bleah.  Leftovers again.  Stick the leftovers in
-                * the front of the queue, and goose the queue again.
-                */
-               scsi_queue_next_request(q, SCpnt);
-               return SCpnt;
+           }
        }
-#if 0
-       /*
-        * This request is done.  If there is someone blocked waiting for this
-        * request, wake them up.  Typically used to wake up processes trying
-        * to swap a page into memory.
-        */
-       if (req->waiting != NULL) {
-               complete(req->waiting);
+    } while (sectors && bh);
+    
+    /*
+     * If there are blocks left over at the end, set up the command
+     * to queue the remainder of them.
+     */
+    if (req->bh) {
+       request_queue_t *q;
+       
+       if( !requeue )
+       {
+           return SCpnt;
        }
-#endif
-       req_finished_io(req);
-       add_blkdev_randomness(MAJOR(req->rq_dev));
-
-        SDpnt = SCpnt->device;
-
+       
+       q = &SCpnt->device->request_queue;
+       
+       req->buffer = bh->b_data;
        /*
-        * This will goose the queue request function at the end, so we don't
-        * need to worry about launching another command.
+        * Bleah.  Leftovers again.  Stick the leftovers in
+        * the front of the queue, and goose the queue again.
         */
-       __scsi_release_command(SCpnt);
-
-       if( frequeue ) {
-               request_queue_t *q;
-
-               q = &SDpnt->request_queue;
-               scsi_queue_next_request(q, NULL);                
-       }
-       return NULL;
+       scsi_queue_next_request(q, SCpnt);
+       return SCpnt;
+    }
+#if 0
+    /*
+     * This request is done.  If there is someone blocked waiting for this
+     * request, wake them up.  Typically used to wake up processes trying
+     * to swap a page into memory.
+     */
+    if (req->waiting != NULL) {
+       complete(req->waiting);
+    }
+#else 
+    /* XXX SMH: we're done -- flip the flag for the spinner :-| */
+    if(req->waiting && (*(int *)(req->waiting) != NULL)) {
+               printk("__scsi_end_request: flipping wait status on req %p\n", req); 
+               *(int *)(req->waiting) = NULL; 
+    } // else printk("__scsi_end_request: no-one to notify!!\n"); 
+#endif
+    req_finished_io(req);
+    add_blkdev_randomness(MAJOR(req->rq_dev));
+    
+    SDpnt = SCpnt->device;
+    
+    /*
+     * This will goose the queue request function at the end, so we don't
+     * need to worry about launching another command.
+     */
+    __scsi_release_command(SCpnt);
+    
+    if( frequeue ) {
+       request_queue_t *q;
+       
+       q = &SDpnt->request_queue;
+       scsi_queue_next_request(q, NULL);                
+    }
+    return NULL;
 }
 
 /*
@@ -554,6 +560,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
        int this_count = SCpnt->bufflen >> 9;
        request_queue_t *q = &SCpnt->device->request_queue;
 
+       // printk("scsi_io_completion entered.\n"); 
        /*
         * We must do one of several things here:
         *
@@ -1053,6 +1060,7 @@ void scsi_request_fn(request_queue_t * q)
                         * get those allocated here.  
                         */
                        if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+                               printk("scsi_request_fn: scsi_init_io_fn failed :-(\n"); 
                                SCpnt = __scsi_end_request(SCpnt, 0, 
                                                           SCpnt->request.nr_sectors, 0, 0);
                                if( SCpnt != NULL )
@@ -1068,6 +1076,7 @@ void scsi_request_fn(request_queue_t * q)
                         * Initialize the actual SCSI command for this request.
                         */
                        if (!STpnt->init_command(SCpnt)) {
+                               printk("scsi_request_fn: init_command failed :-(\n"); 
                                scsi_release_buffers(SCpnt);
                                SCpnt = __scsi_end_request(SCpnt, 0, 
                                                           SCpnt->request.nr_sectors, 0, 0);
index 92306b3ec0a1cda18ec97cc85b2db0ab22644897..4ea7f7e57877322d782c8d30cf09f739eb1bc88a 100644 (file)
@@ -802,6 +802,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
        int                  this_count;
        void               ** bbpnt;
 
+       // printk("scsi_merge.c: __init_io entered\n"); 
        /*
         * FIXME(eric) - don't inline this - it doesn't depend on the
         * integer flags.   Come to think of it, I don't think this is even
index dbb69d24478cb2a0f0122c62ab3ac63d664d2ef2..4c64afd27eb504c30be1e5c09540efb6240dcdf4 100644 (file)
@@ -61,6 +61,8 @@
 
 #include <xeno/genhd.h>
 
+#include <asm/domain_page.h>    /* SMH: for [un_]map_domain_mem() */
+
 /*
  *  static const char RCSid[] = "$Header:";
  */
@@ -324,6 +326,7 @@ static int sd_init_command(Scsi_Cmnd * SCpnt)
        !dpnt->device ||
        !dpnt->device->online ||
        block + SCpnt->request.nr_sectors > ppnt->nr_sects) {
+
        SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 
                                   SCpnt->request.nr_sectors));
        SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
@@ -1132,8 +1135,8 @@ static int sd_init()
        sd_registered++;
     }
     /* We do not support attaching loadable devices yet. */
-    if (rscsi_disks)
-       return 0;
+    if (rscsi_disks) 
+       return 0; 
 
     rscsi_disks = kmalloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
     if (!rscsi_disks)
@@ -1296,9 +1299,73 @@ static void sd_finish()
     }
 #endif
 
+#if 0
+       /* XXX SMH: turn on some logging */
+       scsi_logging_level = ~0;
+       SCSI_SET_LOGGING(SCSI_LOG_HLQUEUE_SHIFT, SCSI_LOG_HLQUEUE_BITS, 1); 
+#endif
+
     return;
 }
 
+
+/* 
+** XXX SMH: gross 'probe' function to allow xeno world to grope us; 
+** this should really not be in the disk-specific code as it should
+** report tapes, CDs, etc. But for now this looks like the easiest 
+** place to hook it in :-( 
+*/
+void scsi_probe_devices(xen_disk_info_t *xdi)
+{
+    Scsi_Disk *sd; 
+    int i, base, diskinfo[4];
+    xen_disk_info_t *xen_xdi = 
+       (xen_disk_info_t *)map_domain_mem(virt_to_phys(xdi));
+
+    /* We've already had IDE probe => we need to append our info */
+    base = xen_xdi->count; 
+
+    for (sd = rscsi_disks, i = 0; i < sd_template.dev_max; i++, sd++) {
+
+        if (sd->device !=NULL) { 
+
+           xen_xdi->disks[xen_xdi->count].type = XEN_DISK_SCSI; 
+           xen_xdi->disks[xen_xdi->count].capacity = sd->capacity; 
+           xen_xdi->count++; 
+
+           /* default bios params to most commonly used values */
+           diskinfo[0] = 0x40;
+           diskinfo[1] = 0x20;
+           diskinfo[2] = (sd->capacity) >> 11;
+           
+           /* override with calculated, extended default,
+              or driver values */
+           /* XXX SMH: gross in-line literal major number. XXX FIXME. */
+           if(sd->device->host->hostt->bios_param != NULL)
+               sd->device->host->hostt->bios_param(
+                   sd, MKDEV(SCSI_DISK0_MAJOR, 0), &diskinfo[0]);
+           else scsicam_bios_param(sd, MKDEV(SCSI_DISK0_MAJOR, 0), 
+                                   &diskinfo[0]);
+
+           
+           printk (KERN_ALERT "SCSI-XENO %d\n", xen_xdi->count - base);
+           printk (KERN_ALERT "  capacity 0x%x\n", sd->capacity);
+           printk (KERN_ALERT "  head     0x%x\n", diskinfo[0]);
+           printk (KERN_ALERT "  sector   0x%x\n", diskinfo[1]);
+           printk (KERN_ALERT "  cylinder 0x%x\n", diskinfo[2]);
+
+
+       }
+    }
+
+    unmap_domain_mem(xen_xdi);
+
+    return; 
+}      
+
+
+
+
 static int sd_detect(Scsi_Device * SDp)
 {
     if (SDp->type != TYPE_DISK && SDp->type != TYPE_MOD)
@@ -1463,6 +1530,8 @@ static void sd_detach(Scsi_Device * SDp)
 
 static int __init init_sd(void)
 {
+    extern int scsi_register_module(int, void *);
+
     sd_template.module = THIS_MODULE;
     return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
 }
index 7d7205b69aef1a9bb3525f684c4889ee606bafa4..08cc5cb0dd4d4e81133f3eded1076724db6bbcb2 100644 (file)
@@ -32,6 +32,7 @@
 #define CONFIG_BLK_DEV_PIIX 1
 
 #define CONFIG_SCSI 1
+#define CONFIG_SCSI_LOGGING 1
 #define CONFIG_BLK_DEV_SD 1
 #define CONFIG_SD_EXTRA_DEVS 40
 #define CONFIG_SCSI_MULTI_LUN 1
index 74a0c6c5654d8d2e959337db3233b9c9326f4e55..56413ed106cde1155d20e590395b607dec7d0662 100644 (file)
@@ -1,3 +1,3 @@
 O_TARGET := blk.o
-obj-y := xl_block.o xl_block_test.o
+obj-y := xl_block.o xl_ide.o xl_scsi.o xl_block_test.o
 include $(TOPDIR)/Rules.make
index 0b77e5536e6570dc1e7c31e3685812488804b526..3130280c57f8a7805066e36f93a33e5c34a450c7 100644 (file)
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
-#define MAJOR_NR XLBLK_MAJOR   /* force defns in blk.h, must precede include */
-static int xlblk_major = XLBLK_MAJOR;
 #include <linux/blk.h>
 
 /* Copied from linux/ide.h */
 typedef unsigned char  byte; 
 
-void xlblk_ide_register_disk(int, unsigned long);
 
-#define XLBLK_MAX 32 /* Maximum minor devices we support */
-#define XLBLK_MAJOR_NAME "xhd"
-#define IDE_PARTN_BITS 6                           /* from ide.h::PARTN_BITS */
-#define IDE_PARTN_MASK ((1<<IDE_PARTN_BITS)-1)     /* from ide.h::PARTN_MASK */
-static int xlblk_blk_size[XLBLK_MAX];
-static int xlblk_blksize_size[XLBLK_MAX];
-static int xlblk_read_ahead; 
-static int xlblk_hardsect_size[XLBLK_MAX];
-static int xlblk_max_sectors[XLBLK_MAX];
+extern int  xlide_init(int xidx, int idx); 
+extern int  xlide_hwsect(int minor); 
+extern void xlide_cleanup(void); 
+extern int  xlscsi_init(int xidx, int idx);
+extern int  xlscsi_hwsect(int minor); 
+extern void xlscsi_cleanup(void); 
+
+static int nide = 0;    // number of IDE devices we have 
+static int nscsi = 0;   // number of SCSI devices we have 
 
-#define XLBLK_RESPONSE_IRQ _EVENT_BLK_RESP
 
+#define XLBLK_MAX 32 /* XXX SMH: this the max of XLIDE_MAX and XLSCSI_MAX */
+
+#define XLBLK_RESPONSE_IRQ _EVENT_BLK_RESP
 #define DEBUG_IRQ    _EVENT_DEBUG 
 
 #if 0
@@ -57,7 +56,8 @@ static int xlblk_max_sectors[XLBLK_MAX];
 
 static blk_ring_t *blk_ring;
 static unsigned int resp_cons; /* Response consumer for comms ring. */
-static xen_disk_info_t xen_disk_info;
+
+xen_disk_info_t xen_disk_info;
 
 int hypervisor_request(void *         id,
                        int            operation,
@@ -70,52 +70,136 @@ int hypervisor_request(void *         id,
 /* ------------------------------------------------------------------------
  */
 
-static int xenolinux_block_open(struct inode *inode, struct file *filep)
+/* Convert from a XenoLinux (major,minor) to the Xen-level 'physical' device */
+static kdev_t xldev_to_physdev(kdev_t xldev) 
+{
+    int xlmajor = MAJOR(xldev); 
+    int major, minor; 
+
+    switch(xlmajor) { 
+    case XLIDE_MAJOR: 
+       major = IDE0_MAJOR; 
+       minor = 0; /* we do minor offsetting manually by addition */
+       break; 
+       
+    case XLSCSI_MAJOR: 
+       major = SCSI_DISK0_MAJOR; 
+       minor = 0; /* we do minor offsetting manually by addition */
+       break; 
+
+    default: 
+       panic("xldev_to_physdev: unhandled major %d\n", xlmajor); 
+       break; 
+    } 
+
+    return MKDEV(major, minor); 
+}
+
+
+/*
+** Locate the gendisk structure associated with a particular xenolinux disk; 
+** this requires a scan of the xen_disk_info[] array currently which kind of
+** sucks. However we can clean this whole area up later (i.e. post SOSP). 
+*/
+struct gendisk *xldev_to_gendisk(kdev_t xldev, int *t) 
+{
+    int i, j, posn, type; 
+
+    switch(MAJOR(xldev)) { 
+       
+    case XLIDE_MAJOR: 
+       type = 1; 
+       posn = 1; 
+       break; 
+       
+    case XLSCSI_MAJOR: 
+       type = 2; 
+       posn = 1; 
+       break; 
+
+    default: 
+       panic("xldev_to_gendisk: unhandled major %d\n", MAJOR(xldev)); 
+       break; 
+    } 
+
+
+    for ( i = j = 0; i < xen_disk_info.count; i++ ) {
+       if(xen_disk_info.disks[i].type == type)
+           if(++j == posn)
+               break; 
+    }
+
+    if(t) 
+       *t = type; 
+
+    return (xen_disk_info.disks[i].gendisk); 
+}
+
+int xenolinux_block_open(struct inode *inode, struct file *filep)
 {
     DPRINTK("xenolinux_block_open\n"); 
     return 0;
 }
 
-static int xenolinux_block_release(struct inode *inode, struct file *filep)
+int xenolinux_block_release(struct inode *inode, struct file *filep)
 {
     DPRINTK("xenolinux_block_release\n");
     return 0;
 }
 
-static int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
+
+
+int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
                          unsigned command, unsigned long argument)
 {
-    int minor_dev;
+    int minor_dev, type;
     struct hd_geometry *geo = (struct hd_geometry *)argument;
-
+    struct gendisk *gd;     
+    struct hd_struct *part; 
+    
     DPRINTK("xenolinux_block_ioctl\n"); 
 
     /* check permissions */
     if (!capable(CAP_SYS_ADMIN)) return -EPERM;
     if (!inode)                  return -EINVAL;
+
     minor_dev = MINOR(inode->i_rdev);
     if (minor_dev >= XLBLK_MAX)  return -ENODEV;
     
     DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, minor: 0x%x\n",
                   command, (long) argument, minor_dev); 
   
+    gd = xldev_to_gendisk(inode->i_rdev, &type); 
+    part = &gd->part[minor_dev]; 
+
     switch (command)
     {
     case BLKGETSIZE:
-        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, 
-                      (long) xen_disk_info.disks[0].capacity); 
-       return put_user(xen_disk_info.disks[0].capacity, 
-                       (unsigned long *) argument);
+        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 
+       return put_user(part->nr_sects, (unsigned long *) argument);
 
     case BLKRRPART:
         DPRINTK_IOCTL("   BLKRRPART: %x\n", BLKRRPART); 
        break;
 
     case BLKSSZGET:
-        DPRINTK_IOCTL("   BLKSSZGET: %x 0x%x\n", BLKSSZGET,
-                      xlblk_hardsect_size[minor_dev]);
-       return xlblk_hardsect_size[minor_dev]; 
-
+       switch(type) {
+       case 1: 
+           DPRINTK_IOCTL("   BLKSSZGET: %x 0x%x\n", BLKSSZGET, 
+                         xlide_hwsect(minor_dev));
+           return xlide_hwsect(minor_dev); 
+           break; 
+       case 2: 
+           DPRINTK_IOCTL("   BLKSSZGET: %x 0x%x\n", BLKSSZGET,
+                         xlscsi_hwsect(minor_dev));
+           return xlscsi_hwsect(minor_dev); 
+           break; 
+
+       default: 
+           printk("BLKSSZGET ioctl() on bogus type %d disk!\n", type); 
+           return 0; 
+
+       }
     case HDIO_GETGEO:
         DPRINTK_IOCTL("   HDIO_GETGEO: %x\n", HDIO_GETGEO);
        if (!argument) return -EINVAL;
@@ -143,13 +227,13 @@ static int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
     return 0;
 }
 
-static int xenolinux_block_check(kdev_t dev)
+int xenolinux_block_check(kdev_t dev)
 {
     DPRINTK("xenolinux_block_check\n");
     return 0;
 }
 
-static int xenolinux_block_revalidate(kdev_t dev)
+int xenolinux_block_revalidate(kdev_t dev)
 {
     DPRINTK("xenolinux_block_revalidate\n"); 
     return 0;
@@ -200,14 +284,13 @@ int hypervisor_request(void *         id,
 
     case XEN_BLOCK_READ:
     case XEN_BLOCK_WRITE:
-       if ( MAJOR(device) != XLBLK_MAJOR ) 
-           panic("error: xl_block::hypervisor_request: "
-                  "unknown device [0x%x]\n", device);
-        phys_device = MKDEV(IDE0_MAJOR, 0);
+
+        phys_device =  xldev_to_physdev(device); 
+
        /* Compute real buffer location on disk */
        sector_number = block_number;
-       if ( (gd = (struct gendisk *)xen_disk_info.disks[0].gendisk) != NULL )
-           sector_number += gd->part[MINOR(device)&IDE_PARTN_MASK].start_sect;
+       gd = xldev_to_gendisk(device, NULL); 
+       sector_number += gd->part[MINOR(device)].start_sect;
         break;
 
     default:
@@ -234,7 +317,7 @@ int hypervisor_request(void *         id,
  * do_xlblk_request
  *  read a block; request is in a request queue
  */
-static void do_xlblk_request (request_queue_t *rq)
+void do_xlblk_request (request_queue_t *rq)
 {
     struct request *req;
     struct buffer_head *bh;
@@ -242,9 +325,10 @@ static void do_xlblk_request (request_queue_t *rq)
     
     DPRINTK("xlblk.c::do_xlblk_request for '%s'\n", DEVICE_NAME); 
 
-    while ( !rq->plugged && !QUEUE_EMPTY )
+    while ( !rq->plugged && !list_empty(&rq->queue_head))
     {
-       if ( (req = CURRENT) == NULL ) goto out;
+       if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 
+           goto out;
                
         DPRINTK("do_xlblk_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
                 req, req->cmd, req->sector,
@@ -310,7 +394,7 @@ static struct block_device_operations xenolinux_block_fops =
 
 static void xlblk_response_int(int irq, void *dev_id, struct pt_regs *ptregs)
 {
-    int i;
+    int i; 
     unsigned long flags; 
     struct buffer_head *bh;
     
@@ -327,15 +411,24 @@ static void xlblk_response_int(int irq, void *dev_id, struct pt_regs *ptregs)
     resp_cons = i;
 
     /* KAF: We can push work down at this point. We have the lock. */
-    do_xlblk_request(BLK_DEFAULT_QUEUE(MAJOR_NR));
-    
+    for (i = 0; i < xen_disk_info.count; i++) {
+       /*
+       ** XXX SMH: this is pretty broken ... 
+       **     a) should really only kick devs w/ outstanding work 
+       **     b) should cover /all/ devs, not just first IDE & SCSI
+       ** KAF will fix this I'm sure. 
+       */
+       do_xlblk_request(BLK_DEFAULT_QUEUE(IDE0_MAJOR));
+       do_xlblk_request(BLK_DEFAULT_QUEUE(SCSI_DISK0_MAJOR));
+    }
+
     spin_unlock_irqrestore(&io_request_lock, flags);
 }
 
 
 int __init xlblk_init(void)
 {
-    int i, error, result;
+    int i, error;
 
     /* This mapping was created early at boot time. */
     blk_ring = (blk_ring_t *)fix_to_virt(FIX_BLKRING_BASE);
@@ -356,129 +449,57 @@ int __init xlblk_init(void)
         BUG();
     HYPERVISOR_block_io_op();
     while ( blk_ring->resp_prod != 1 ) barrier();
+
     for ( i = 0; i < xen_disk_info.count; i++ )
     { 
+       /* 
+       ** SMH: initialize all the disks we found; this is complicated a 
+       ** bit by the fact that we have both IDE and SCSI disks underneath 
+       */
        printk (KERN_ALERT "  %2d: type: %d, capacity: %ld\n",
                i, xen_disk_info.disks[i].type, 
                xen_disk_info.disks[i].capacity);
-    }
-    
-    SET_MODULE_OWNER(&xenolinux_block_fops);
-    result = register_blkdev(xlblk_major, "block", &xenolinux_block_fops);
-    if (result < 0) {
-       printk (KERN_ALERT "xenolinux block: can't get major %d\n",
-               xlblk_major);
-       return result;
-    }
+       
+       switch(xen_disk_info.disks[i].type) { 
+       case 1: 
+           xlide_init(i, nide++); 
+           break; 
+       case 2: 
+           xlscsi_init(i, nscsi++); 
+           break; 
+       default: 
+           printk("Unknown Xen disk type %d\n", xen_disk_info.disks[i].type);
+           break; 
+       }
 
-    /* initialize global arrays in drivers/block/ll_rw_block.c */
-    for (i = 0; i < XLBLK_MAX; i++) {
-       xlblk_blk_size[i]      = xen_disk_info.disks[0].capacity;
-       xlblk_blksize_size[i]  = 512;
-       xlblk_hardsect_size[i] = 512;
-       xlblk_max_sectors[i]   = 128;
     }
-    xlblk_read_ahead  = 8; 
-
-    blk_size[xlblk_major]      = xlblk_blk_size;
-    blksize_size[xlblk_major]  = xlblk_blksize_size;
-    hardsect_size[xlblk_major] = xlblk_hardsect_size;
-    read_ahead[xlblk_major]    = xlblk_read_ahead; 
-    max_sectors[xlblk_major]   = xlblk_max_sectors;
-
-    blk_init_queue(BLK_DEFAULT_QUEUE(xlblk_major), do_xlblk_request);
-
-    /*
-     * Turn off barking 'headactive' mode. We dequeue buffer heads as
-     * soon as we pass them down to Xen.
-     */
-    blk_queue_headactive(BLK_DEFAULT_QUEUE(xlblk_major), 0);
 
-    xlblk_ide_register_disk(0, xen_disk_info.disks[0].capacity);
-
-    printk(KERN_ALERT 
-          "XenoLinux Virtual Block Device Driver installed [device: %d]\n",
-          xlblk_major);
     return 0;
 
  fail:
     return error;
 }
 
-void xlblk_ide_register_disk(int idx, unsigned long capacity)
-{
-    int units;
-    int minors;
-    struct gendisk *gd;
-
-    /* plagarized from ide-probe.c::init_gendisk */
-    
-    units = 2; /* from ide.h::MAX_DRIVES */
-
-    minors    = units * (1<<IDE_PARTN_BITS);
-    gd        = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
-    gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
-    gd->part  = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
-    memset(gd->part, 0, minors * sizeof(struct hd_struct));
-    
-    gd->major        = xlblk_major;  
-    gd->major_name   = XLBLK_MAJOR_NAME;
-    gd->minor_shift  = IDE_PARTN_BITS; 
-    gd->max_p       = 1<<IDE_PARTN_BITS;
-    gd->nr_real             = units;           
-    gd->real_devices = NULL;          
-    gd->next        = NULL;            
-    gd->fops         = &xenolinux_block_fops;
-    gd->de_arr       = kmalloc (sizeof *gd->de_arr * units, GFP_KERNEL);
-    gd->flags       = kmalloc (sizeof *gd->flags * units, GFP_KERNEL);
-
-    if (gd->de_arr)  
-       memset (gd->de_arr, 0, sizeof *gd->de_arr * units);
-
-    if (gd->flags) 
-       memset (gd->flags, 0, sizeof *gd->flags * units);
-
-    add_gendisk(gd);
-
-    xen_disk_info.disks[idx].gendisk = gd;
-
-    /* default disk size is just a big number.  in the future, we
-       need a message to probe the devices to determine the actual size */
-    register_disk(gd, MKDEV(xlblk_major, 0), 1<<IDE_PARTN_BITS,
-                 &xenolinux_block_fops, capacity);
-
-    return;
-}
-
-
 
 static void __exit xlblk_cleanup(void)
 {
-    /* CHANGE FOR MULTIQUEUE */
-    blk_cleanup_queue(BLK_DEFAULT_QUEUE(xlblk_major));
-
-    /* clean up global arrays */
-    read_ahead[xlblk_major] = 0;
+    int i; 
 
-    if (blk_size[xlblk_major]) 
-       kfree(blk_size[xlblk_major]);
-    blk_size[xlblk_major] = NULL;
-
-    if (blksize_size[xlblk_major]) 
-       kfree(blksize_size[xlblk_major]);
-    blksize_size[xlblk_major] = NULL;
+    for ( i = 0; i < xen_disk_info.count; i++ )
+    { 
+       switch(xen_disk_info.disks[i].type) { 
+       case 1: 
+           xlide_cleanup(); 
+           break; 
+       case 2: 
+           xlscsi_cleanup(); 
+           break; 
+       default: 
+           printk("Unknown Xen disk type %d\n", xen_disk_info.disks[i].type);
+           break; 
+       }
 
-    if (hardsect_size[xlblk_major]) 
-       kfree(hardsect_size[xlblk_major]);
-    hardsect_size[xlblk_major] = NULL;
-    
-    /* XXX: free each gendisk */
-    if (unregister_blkdev(xlblk_major, "block"))
-       printk(KERN_ALERT
-              "XenoLinux Virtual Block Device Driver uninstalled w/ errs\n");
-    else
-       printk(KERN_ALERT 
-              "XenoLinux Virtual Block Device Driver uninstalled\n");
+    }
 
     return;
 }
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c
new file mode 100644 (file)
index 0000000..50f8003
--- /dev/null
@@ -0,0 +1,200 @@
+/******************************************************************************
+ * xl_ide.c
+ * 
+ * Xenolinux virtual IDE block-device driver.
+ * 
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/major.h>
+
+#define MAJOR_NR XLIDE_MAJOR   /* force defns in blk.h, must precede include */
+static int xlide_major = XLIDE_MAJOR;
+#include <linux/blk.h>
+
+void xlide_ide_register_disk(int, unsigned long);
+
+#define XLIDE_MAX 32 /* Maximum minor devices we support */
+#define XLIDE_MAJOR_NAME "xhd"
+#define IDE_PARTN_BITS 6                           /* from ide.h::PARTN_BITS */
+#define IDE_PARTN_MASK ((1<<IDE_PARTN_BITS)-1)     /* from ide.h::PARTN_MASK */
+static int xlide_blk_size[XLIDE_MAX];
+static int xlide_blksize_size[XLIDE_MAX];
+static int xlide_read_ahead; 
+static int xlide_hardsect_size[XLIDE_MAX];
+static int xlide_max_sectors[XLIDE_MAX];
+
+extern xen_disk_info_t xen_disk_info;
+
+
+extern int xenolinux_block_open(struct inode *inode, struct file *filep);
+extern int xenolinux_block_release(struct inode *inode, struct file *filep);
+extern int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
+                                unsigned command, unsigned long argument);
+extern int xenolinux_block_check(kdev_t dev);
+extern int xenolinux_block_revalidate(kdev_t dev);
+
+
+extern void do_xlblk_request (request_queue_t *rq); 
+
+
+static struct block_device_operations xlide_block_fops = 
+{
+    open:               xenolinux_block_open,
+    release:            xenolinux_block_release,
+    ioctl:              xenolinux_block_ioctl,
+    check_media_change: xenolinux_block_check,
+    revalidate:         xenolinux_block_revalidate,
+};
+
+
+/* tiny inteface fn */
+int xlide_hwsect(int minor) 
+{
+    return xlide_hardsect_size[minor]; 
+} 
+
+
+void xlide_register_disk(int xidx, int idx)
+{
+    int units;
+    int minors;
+    struct gendisk *gd;
+
+    /* plagarized from ide-probe.c::init_gendisk */
+    units = 2; /* from ide.h::MAX_DRIVES */
+
+    minors    = units * (1<<IDE_PARTN_BITS);
+    gd        = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
+    gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
+    gd->part  = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
+    memset(gd->part, 0, minors * sizeof(struct hd_struct));
+    
+    gd->major        = xlide_major;         /* XXX should be idx-specific */
+    gd->major_name   = XLIDE_MAJOR_NAME;    /* XXX should be idx-specific */
+    gd->minor_shift  = IDE_PARTN_BITS; 
+    gd->max_p       = 1<<IDE_PARTN_BITS;
+    gd->nr_real             = units;           
+    gd->real_devices = NULL;          
+    gd->next        = NULL;            
+    gd->fops         = &xlide_block_fops;
+    gd->de_arr       = kmalloc (sizeof *gd->de_arr * units, GFP_KERNEL);
+    gd->flags       = kmalloc (sizeof *gd->flags * units, GFP_KERNEL);
+
+    if (gd->de_arr)  
+       memset (gd->de_arr, 0, sizeof *gd->de_arr * units);
+
+    if (gd->flags) 
+       memset (gd->flags, 0, sizeof *gd->flags * units);
+
+    add_gendisk(gd);
+
+    xen_disk_info.disks[xidx].gendisk = gd;
+
+    /* XXX major should be idx-specific */
+    register_disk(gd, MKDEV(xlide_major, 0), 1<<IDE_PARTN_BITS, 
+                 &xlide_block_fops, xen_disk_info.disks[xidx].capacity);
+
+    return;
+}
+
+
+
+/*
+** Initialize a XenoLinux IDE disk; the 'xidx' is the index into the 
+** xen_disk_info array so we can grab interesting values; the 'idx' is 
+** a count of the number of XLSCSI disks we've seen so far, starting at 0
+** XXX SMH: this is all so ugly because the xen_disk_info() structure and 
+** array doesn't really give us what we want. Ho hum. To be tidied someday. 
+*/
+int xlide_init(int xidx, int idx) 
+{
+    int i, major, result;
+
+    SET_MODULE_OWNER(&xlide_block_fops);
+
+    major  = xlide_major + idx;  /* XXX assume we have a linear major space */
+
+    /* XXX SMH: name below should vary with major */
+    result = register_blkdev(major, XLIDE_MAJOR_NAME, &xlide_block_fops);
+    if (result < 0) {
+       printk (KERN_ALERT "XL IDE: can't get major %d\n",
+               major);
+       return result;
+    }
+
+    /* initialize global arrays in drivers/block/ll_rw_block.c */
+    for (i = 0; i < XLIDE_MAX; i++) {
+       xlide_blk_size[i]      = xen_disk_info.disks[0].capacity;
+       xlide_blksize_size[i]  = 512;
+       xlide_hardsect_size[i] = 512;
+       xlide_max_sectors[i]   = 128;
+    }
+    xlide_read_ahead  = 8; 
+
+    blk_size[major]      = xlide_blk_size;
+    blksize_size[major]  = xlide_blksize_size;
+    hardsect_size[major] = xlide_hardsect_size;
+    read_ahead[major]    = xlide_read_ahead; 
+    max_sectors[major]   = xlide_max_sectors;
+
+    blk_init_queue(BLK_DEFAULT_QUEUE(major), do_xlblk_request);
+
+    /*
+     * Turn off barking 'headactive' mode. We dequeue buffer heads as
+     * soon as we pass them down to Xen.
+     */
+    blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
+
+    xlide_register_disk(xidx, idx); 
+
+    printk(KERN_ALERT 
+          "XenoLinux Virtual IDE Device Driver installed [device: %d]\n",
+          major);
+
+    return 0;
+}
+
+
+void xlide_cleanup(void)
+{
+    /* CHANGE FOR MULTIQUEUE */
+    blk_cleanup_queue(BLK_DEFAULT_QUEUE(xlide_major));
+
+    /* clean up global arrays */
+    read_ahead[xlide_major] = 0;
+
+    if (blk_size[xlide_major]) 
+       kfree(blk_size[xlide_major]);
+    blk_size[xlide_major] = NULL;
+
+    if (blksize_size[xlide_major]) 
+       kfree(blksize_size[xlide_major]);
+    blksize_size[xlide_major] = NULL;
+
+    if (hardsect_size[xlide_major]) 
+       kfree(hardsect_size[xlide_major]);
+    hardsect_size[xlide_major] = NULL;
+    
+    /* XXX: free each gendisk */
+    if (unregister_blkdev(xlide_major, XLIDE_MAJOR_NAME))
+       printk(KERN_ALERT
+              "XenoLinux Virtual IDE Device Driver uninstalled w/ errs\n");
+    else
+       printk(KERN_ALERT 
+              "XenoLinux Virtual IDE Device Driver uninstalled\n");
+
+    return;
+}
+
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c
new file mode 100644 (file)
index 0000000..3ee8650
--- /dev/null
@@ -0,0 +1,211 @@
+/******************************************************************************
+ * xl_scsi.c
+ * 
+ * Xenolinux virtual SCSI block-device driver.
+ * 
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/major.h>
+
+#define MAJOR_NR XLSCSI_MAJOR   /* force defns in blk.h, must precede include */
+static int xlscsi_major = XLSCSI_MAJOR;
+#include <linux/blk.h>
+
+/* Copied from linux/ide.h */
+typedef unsigned char  byte; 
+
+void xlscsi_ide_register_disk(int, unsigned long);
+
+#define SCSI_DISKS_PER_MAJOR 16    /* max number of devices per scsi major */
+#define XLSCSI_MAX 32              /* maximum minor devices we support */
+#define XLSCSI_MAJOR_NAME "xsd"
+
+static int xlscsi_blk_size[XLSCSI_MAX];
+static int xlscsi_blksize_size[XLSCSI_MAX];
+static int xlscsi_read_ahead; 
+static int xlscsi_hardsect_size[XLSCSI_MAX];
+static int xlscsi_max_sectors[XLSCSI_MAX];
+
+#if 0
+#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
+#endif
+
+extern xen_disk_info_t xen_disk_info;
+
+extern int xenolinux_block_open(struct inode *inode, struct file *filep);
+extern int xenolinux_block_release(struct inode *inode, struct file *filep);
+extern int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
+                                unsigned command, unsigned long argument);
+extern int xenolinux_block_check(kdev_t dev);
+extern int xenolinux_block_revalidate(kdev_t dev);
+
+
+extern void do_xlblk_request (request_queue_t *rq); 
+
+static struct block_device_operations xlscsi_block_fops = 
+{
+    open:               xenolinux_block_open,
+    release:            xenolinux_block_release,
+    ioctl:              xenolinux_block_ioctl,
+    check_media_change: xenolinux_block_check,
+    revalidate:         xenolinux_block_revalidate,
+};
+
+
+/* tiny inteface fn */
+int xlscsi_hwsect(int minor) 
+{
+    return xlscsi_hardsect_size[minor]; 
+} 
+
+
+void xlscsi_register_disk(int xidx, int idx)
+{
+    int minors;
+    struct gendisk *gd;
+    unsigned long capacity; 
+
+    minors    = XLSCSI_MAX; 
+    gd        = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
+    gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
+    gd->part  = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
+    memset(gd->part, 0, minors * sizeof(struct hd_struct));
+    
+    if(idx > 0) 
+       printk("xlscsi_register_disk: need fix to handle "
+              "multiple SCSI majors!\n"); 
+    
+    gd->major        = xlscsi_major;       /* XXX should be idx-specific */
+    gd->major_name   = XLSCSI_MAJOR_NAME;  /* XXX should be idx-specific */
+    gd->minor_shift  = 4; 
+    gd->max_p       = 1<<4; 
+    gd->nr_real             = SCSI_DISKS_PER_MAJOR; 
+    gd->real_devices = NULL;          
+    gd->next        = NULL;            
+    gd->fops         = &xlscsi_block_fops;
+    gd->de_arr       = kmalloc (sizeof *gd->de_arr * SCSI_DISKS_PER_MAJOR, 
+                               GFP_KERNEL);
+    gd->flags       = kmalloc (sizeof *gd->flags * SCSI_DISKS_PER_MAJOR, 
+                               GFP_KERNEL);
+
+    if (gd->de_arr)  
+       memset (gd->de_arr, 0, sizeof *gd->de_arr * SCSI_DISKS_PER_MAJOR);
+
+    if (gd->flags) 
+       memset (gd->flags, 0, sizeof *gd->flags * SCSI_DISKS_PER_MAJOR);
+
+    add_gendisk(gd);
+
+    xen_disk_info.disks[xidx].gendisk = gd;
+
+    /* XXX major below should be idx-specific */
+    register_disk(gd, MKDEV(xlscsi_major, 0), 1<<4, &xlscsi_block_fops, 
+                 xen_disk_info.disks[xidx].capacity);
+
+    return;
+}
+
+
+/*
+** Initialize a XenoLinux SCSI disk; the 'xidx' is the index into the 
+** xen_disk_info array so we can grab interesting values; the 'idx' is 
+** a count of the number of XLSCSI disks we've seen so far, starting at 0
+** XXX SMH: this is all so ugly because the xen_disk_info() structure and 
+** array doesn't really give us what we want. Ho hum. To be tidied someday. 
+*/
+int xlscsi_init(int xidx, int idx)
+{
+    int i, major, result;
+
+    SET_MODULE_OWNER(&xlscsi_block_fops);
+
+    major  = xlscsi_major + idx;   /* XXX asume we have linear major space */
+    
+    /* XXX SMH: 'name' below should vary for different major values */
+    result = register_blkdev(major, XLSCSI_MAJOR_NAME, &xlscsi_block_fops);
+
+    if (result < 0) {
+       printk (KERN_ALERT "XL SCSI: can't get major %d\n", major);
+       return result;
+    }
+
+    /* initialize global arrays in drivers/block/ll_rw_block.c */
+    for (i = 0; i < XLSCSI_MAX; i++) {
+       xlscsi_blk_size[i]      = xen_disk_info.disks[xidx].capacity;
+       xlscsi_blksize_size[i]  = 512;
+       xlscsi_hardsect_size[i] = 512;
+       xlscsi_max_sectors[i]   = 128;
+    }
+    xlscsi_read_ahead  = 8; 
+
+    blk_size[major]      = xlscsi_blk_size;
+    blksize_size[major]  = xlscsi_blksize_size;
+    hardsect_size[major] = xlscsi_hardsect_size;
+    read_ahead[major]    = xlscsi_read_ahead; 
+    max_sectors[major]   = xlscsi_max_sectors;
+
+    blk_init_queue(BLK_DEFAULT_QUEUE(major), do_xlblk_request);
+
+    /*
+     * Turn off barking 'headactive' mode. We dequeue buffer heads as
+     * soon as we pass them down to Xen.
+     */
+    blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
+    
+    xlscsi_register_disk(xidx, idx);
+
+    printk(KERN_ALERT 
+          "XenoLinux Virtual SCSI Device Driver installed [device: %d]\n",
+          major);
+    return 0;
+}
+
+
+
+void xlscsi_cleanup(void)
+{
+    /* CHANGE FOR MULTIQUEUE */
+    blk_cleanup_queue(BLK_DEFAULT_QUEUE(xlscsi_major));
+
+    /* clean up global arrays */
+    read_ahead[xlscsi_major] = 0;
+
+    if (blk_size[xlscsi_major]) 
+       kfree(blk_size[xlscsi_major]);
+    blk_size[xlscsi_major] = NULL;
+
+    if (blksize_size[xlscsi_major]) 
+       kfree(blksize_size[xlscsi_major]);
+    blksize_size[xlscsi_major] = NULL;
+
+    if (hardsect_size[xlscsi_major]) 
+       kfree(hardsect_size[xlscsi_major]);
+    hardsect_size[xlscsi_major] = NULL;
+    
+    /* XXX: free each gendisk */
+    if (unregister_blkdev(xlscsi_major, XLSCSI_MAJOR_NAME))
+       printk(KERN_ALERT
+              "XenoLinux Virtual SCSI Device Driver uninstalled w/ errs\n");
+    else
+       printk(KERN_ALERT 
+              "XenoLinux Virtual SCSI Device Driver uninstalled\n");
+
+    return;
+}
+
index dfcb6f79f0ae73fa794cc49b6c5ca9cb6f2bd8a5..a838d477d1cda1e8af57081d84b6a33ca2aa2093 100644 (file)
 
 #define        UMEM_MAJOR      116     /* http://www.umem.com/ Battery Backed RAM */
 
-#define XLBLK_MAJOR    123     /* XenoLinux Block Device */
+#define XLIDE_MAJOR    123     /* XenoLinux IDE Device */
+#define XLSCSI_MAJOR   133     /* XenoLinux SCSI Device */
 
 #define RTF_MAJOR      150
 #define RAW_MAJOR      162
index b0b143a264c7872fad0caa1de7f65114b2edced9..68c1a4a002cf47fd1c835cfc35213c141fd1ece1 100644 (file)
@@ -232,9 +232,7 @@ static struct dev_name_struct {
        { "ataraid/d15p",0x72F0 },
 #if defined(CONFIG_XENOLINUX_BLOCK)
         { "xhda",    0x7B00 },
-        { "xhdb",    0x7C00 },
-        { "xhdc",    0x7D00 },
-        { "xhdd",    0x7E00 },
+        { "xsda",    0x8500 },
 #endif
        { "nftla", 0x5d00 },
        { "nftlb", 0x5d10 },